source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
nmt_mask_flat.c | #include "config.h"
#include "utils.h"
static void apodize_mask_CX(nmt_flatsky_info *fs,flouble *mask_in,flouble *mask_out,
flouble aposize,char *apotype)
{
double aporad=aposize*M_PI/180;
int apotyp=0;
if(!strcmp(apotype,"C1"))
apotyp=0;
else if(!strcmp(apotype,"C2"))
apotyp=1;
else
report_error(NMT_ERROR_APO,"Unknown apodization type %s\n",apotype);
if(mask_out!=mask_in)
memcpy(mask_out,mask_in,fs->nx*fs->ny*sizeof(flouble));
#pragma omp parallel default(none) \
shared(fs,mask_in,mask_out,aporad,apotyp)
{
int iy;
flouble **rarr;
double x_thr=aporad;
double inv_x_thr=1./x_thr;
flouble dx=fs->lx/fs->nx;
flouble dy=fs->lx/fs->nx;
int nx_patch=(int)(1.2*aporad/dx);
int ny_patch=(int)(1.2*aporad/dy);
rarr=my_malloc((2*ny_patch+1)*sizeof(flouble *));
for(iy=0;iy<=2*ny_patch;iy++) {
int ix;
flouble y=(iy-ny_patch)*dy;
rarr[iy]=my_malloc((2*nx_patch+1)*sizeof(flouble));
for(ix=0;ix<=2*nx_patch;ix++) {
flouble x=(ix-nx_patch)*dx;
rarr[iy][ix]=sqrt(x*x+y*y);
}
}
#pragma omp for schedule(dynamic)
for(iy=0;iy<fs->ny;iy++) {
int ix;
for(ix=0;ix<fs->nx;ix++) {
int index=ix+fs->nx*iy;
flouble rmin=100000;
if(mask_in[index]>0) {
int iyy;
for(iyy=0;iyy<=2*ny_patch;iyy++) {
int ixx;
if(iy+iyy-ny_patch<0) continue;
if(iy+iyy-ny_patch>=fs->ny) break;
for(ixx=0;ixx<=2*nx_patch;ixx++) {
if(ix+ixx-nx_patch<0) continue;
if(ix+ixx-nx_patch>=fs->nx) break;
int index2=ix+ixx-nx_patch+fs->nx*(iy+iyy-ny_patch);
if(mask_in[index2]<=0)
if(rarr[iyy][ixx]<rmin) rmin=rarr[iyy][ixx];
}
}
if(rmin<x_thr) {
flouble f,xn;
if(rmin<0)
f=0;
else {
xn=rmin*inv_x_thr;
if(apotyp==0)
f=xn-sin(xn*2*M_PI)/(2*M_PI);
else
f=0.5*(1-cos(xn*M_PI));
}
mask_out[index]*=f;
}
}
}
} //end omp for
for(iy=0;iy<=2*ny_patch;iy++)
free(rarr[iy]);
free(rarr);
} //end omp parallel
}
static void apodize_mask_smooth(nmt_flatsky_info *fs,flouble *mask_in,flouble *mask_out,flouble aposize)
{
long npix=fs->nx*fs->ny;
double aporad=aposize*M_PI/180;
flouble *mask_dum=my_malloc(npix*sizeof(flouble));
fcomplex *alms_dum=my_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex));
memcpy(mask_dum,mask_in,npix*sizeof(flouble));
#pragma omp parallel default(none) \
shared(fs,npix,mask_in,mask_dum,aporad)
{
int iy;
flouble **rarr;
double x_thr=2.5*aporad;
flouble dx=fs->lx/fs->nx;
flouble dy=fs->lx/fs->nx;
int nx_patch=(int)(1.2*x_thr/dx);
int ny_patch=(int)(1.2*x_thr/dy);
rarr=my_malloc((2*ny_patch+1)*sizeof(flouble *));
for(iy=0;iy<=2*ny_patch;iy++) {
int ix;
flouble y=(iy-ny_patch)*dy;
rarr[iy]=my_malloc((2*nx_patch+1)*sizeof(flouble));
for(ix=0;ix<=2*nx_patch;ix++) {
flouble x=(ix-nx_patch)*dx;
rarr[iy][ix]=sqrt(x*x+y*y);
}
}
#pragma omp for schedule(dynamic)
for(iy=0;iy<fs->ny;iy++) {
int ix;
for(ix=0;ix<fs->nx;ix++) {
int index=ix+fs->nx*iy;
if(mask_in[index]<=0) {
int iyy;
for(iyy=0;iyy<=2*ny_patch;iyy++) {
int ixx;
if(iy+iyy-ny_patch<0) continue;
if(iy+iyy-ny_patch>=fs->ny) break;
for(ixx=0;ixx<=2*nx_patch;ixx++) {
if(ix+ixx-nx_patch<0) continue;
if(ix+ixx-nx_patch>=fs->nx) break;
if(rarr[iyy][ixx]<=x_thr) {
int index2=ix+ixx-nx_patch+fs->nx*(iy+iyy-ny_patch);
mask_dum[index2]*=0;
}
}
}
}
}
} //end omp for
for(iy=0;iy<=2*ny_patch;iy++)
free(rarr[iy]);
free(rarr);
} //end omp parallel
fs_map2alm(fs,1,0,&mask_dum,&alms_dum);
fs_alter_alm(fs,aporad*180*60*2.355/M_PI,alms_dum,alms_dum,NULL,0);
fs_alm2map(fs,1,0,&mask_dum,&alms_dum);
fs_map_product(fs,mask_in,mask_dum,mask_out);
free(mask_dum);
free(alms_dum);
}
void nmt_apodize_mask_flat(int nx,int ny,flouble lx,flouble ly,
flouble *mask_in,flouble *mask_out,flouble aposize,char *apotype)
{
if(aposize<0)
report_error(NMT_ERROR_APO,"Apodization scale must be a positive number\n");
else if(aposize==0) {
int ii;
for(ii=0;ii<nx*ny;ii++)
mask_out[ii]=mask_in[ii];
}
else {
nmt_flatsky_info *fs=nmt_flatsky_info_alloc(nx,ny,lx,ly);
if((!strcmp(apotype,"C1")) || (!strcmp(apotype,"C2"))) {
apodize_mask_CX(fs,mask_in,mask_out,aposize,apotype);
}
else if(!strcmp(apotype,"Smooth"))
apodize_mask_smooth(fs,mask_in,mask_out,aposize);
else {
nmt_flatsky_info_free(fs);
report_error(NMT_ERROR_APO,"Unknown apodization type %s. Allowed: \"Smooth\", \"C1\", \"C2\"\n",apotype);
}
nmt_flatsky_info_free(fs);
}
}
|
HelloOpenMP_fix2.c | #include <stdio.h>
#include <omp.h>
int main(int argc, char *argv[]){
#pragma omp parallel
{
int nthreads = omp_get_num_threads();
int thread_id = omp_get_thread_num();
printf("Goodbye slow serial world and Hello OpenMP!\n");
printf(" I have %d thread(s) and my thread id is %d\n",nthreads,thread_id);
}
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) {
for (t4=max(max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(16*t1+Nx+29,32)),floord(32*t2+Nx+28,32)),floord(8*t3+Nx+4,32)),floord(32*t1-32*t2+Nz+Nx+27,32));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),32*t4+30),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unaryop__minv_int32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int32_int32
// op(A') function: GB_tran__minv_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 32)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 32) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int32_int32
(
int32_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
example_vecfill.c | #include "../adaptive/adaptive.h"
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int *data;
void kernel(const size_t i_begin, const size_t i_end) {
for (size_t i = i_begin; i < i_end; i++) data[i] = data[i] * 10 + omp_get_thread_num() + 1;
}
void adaptive_version(size_t s) { c_parallel_for(kernel, 0ul, s); }
void omp_version(size_t s) {
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < s; i++) { data[i] = data[i] * 10 + omp_get_thread_num() + 1; }
}
int main(int argc, char *argv[]) {
size_t s = 1000;
data = malloc(s * sizeof(int));
int run = 1;
double t0, t1;
// Adaptive
t0 = omp_get_wtime();
for (run = 1; run < 1000; run++) {
if (run % 10000 == 0) { printf("Run %d\n", run); }
for (size_t j = 0; j < s; j++) { data[j] = 0; }
adaptive_version(s);
}
t1 = omp_get_wtime();
printf("Adaptive: %lf secs\n", t1 - t0);
// OpenMP
t0 = omp_get_wtime();
for (run = 1; run < 1000; run++) {
if (run % 10000 == 0) { printf("Run %d\n", run); }
for (size_t j = 0; j < s; j++) { data[j] = 0; }
omp_version(s);
}
t1 = omp_get_wtime();
printf("OpenMP : %lf secs\n", t1 - t0);
return 0;
} |
GB_cumsum.c | //------------------------------------------------------------------------------
// GB_cumsum: cumlative sum of an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Compute the cumulative sum of an array count[0:n], of size n+1:
// k = sum (count [0:n-1] != 0) ;
// count = cumsum ([0 count[0:n-1]]) ;
// That is, count [j] on input is overwritten with sum (count [0..j-1]).
// On input, count [n] is not accessed and is implicitly zero on input.
// On output, count [n] is the total sum.
#include "GB.h"
GB_PUBLIC
void GB_cumsum // cumulative sum of an array
(
int64_t *restrict count, // size n+1, input/output
const int64_t n,
int64_t *restrict kresult, // return k, if needed by the caller
int nthreads,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (count != NULL) ;
ASSERT (n >= 0) ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
#if !defined ( _OPENMP )
nthreads = 1 ;
#endif
if (nthreads > 1)
{
nthreads = GB_IMIN (nthreads, n / (64 * 1024)) ;
nthreads = GB_IMAX (nthreads, 1) ;
}
//--------------------------------------------------------------------------
// count = cumsum ([0 count[0:n-1]]) ;
//--------------------------------------------------------------------------
if (kresult == NULL)
{
if (nthreads <= 2)
{
//------------------------------------------------------------------
// cumsum with one thread
//------------------------------------------------------------------
int64_t s = 0 ;
for (int64_t i = 0 ; i < n ; i++)
{
int64_t c = count [i] ;
count [i] = s ;
s += c ;
}
count [n] = s ;
}
else
{
//------------------------------------------------------------------
// cumsum with multiple threads
//------------------------------------------------------------------
// allocate workspace
GB_WERK_DECLARE (ws, int64_t) ;
GB_WERK_PUSH (ws, nthreads, int64_t) ;
if (ws == NULL)
{
// out of memory; use a single thread instead
GB_cumsum (count, n, NULL, 1, NULL) ;
return ;
}
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
// each task sums up its own part
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t s = 0 ;
for (int64_t i = istart ; i < iend ; i++)
{
s += count [i] ;
}
ws [tid] = s ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
// each tasks computes the cumsum of its own part
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t s = 0 ;
for (int i = 0 ; i < tid ; i++)
{
s += ws [i] ;
}
for (int64_t i = istart ; i < iend ; i++)
{
int64_t c = count [i] ;
count [i] = s ;
s += c ;
}
if (iend == n)
{
count [n] = s ;
}
}
// free workspace
GB_WERK_POP (ws, int64_t) ;
}
}
else
{
if (nthreads <= 2)
{
//------------------------------------------------------------------
// cumsum with one thread, also compute k
//------------------------------------------------------------------
int64_t k = 0 ;
int64_t s = 0 ;
for (int64_t i = 0 ; i < n ; i++)
{
int64_t c = count [i] ;
if (c != 0) k++ ;
count [i] = s ;
s += c ;
}
count [n] = s ;
(*kresult) = k ;
}
else
{
//------------------------------------------------------------------
// cumsum with multiple threads, also compute k
//------------------------------------------------------------------
// allocate workspace
GB_WERK_DECLARE (ws, int64_t) ;
GB_WERK_DECLARE (wk, int64_t) ;
GB_WERK_PUSH (ws, nthreads, int64_t) ;
GB_WERK_PUSH (wk, nthreads, int64_t) ;
if (ws == NULL || wk == NULL)
{
// out of memory; use a single thread instead
GB_WERK_POP (wk, int64_t) ;
GB_WERK_POP (ws, int64_t) ;
GB_cumsum (count, n, kresult, 1, NULL) ;
return ;
}
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
// each task sums up its own part
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t k = 0 ;
int64_t s = 0 ;
for (int64_t i = istart ; i < iend ; i++)
{
int64_t c = count [i] ;
if (c != 0) k++ ;
s += c ;
}
ws [tid] = s ;
wk [tid] = k ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
// each task computes the cumsum of its own part
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t s = 0 ;
for (int i = 0 ; i < tid ; i++)
{
s += ws [i] ;
}
for (int64_t i = istart ; i < iend ; i++)
{
int64_t c = count [i] ;
count [i] = s ;
s += c ;
}
if (iend == n)
{
count [n] = s ;
}
}
int64_t k = 0 ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
k += wk [tid] ;
}
(*kresult) = k ;
// free workspace
GB_WERK_POP (wk, int64_t) ;
GB_WERK_POP (ws, int64_t) ;
}
}
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__lxor_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int8)
// A*D function (colscale): GB (_AxD__lxor_int8)
// D*A function (rowscale): GB (_DxB__lxor_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int8)
// C=scalar+B GB (_bind1st__lxor_int8)
// C=scalar+B' GB (_bind1st_tran__lxor_int8)
// C=A+scalar GB (_bind2nd__lxor_int8)
// C=A'+scalar GB (_bind2nd_tran__lxor_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT8 || GxB_NO_LXOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_fp64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp64_uint32
// op(A') function: GB_tran__ainv_fp64_uint32
// C type: double
// A type: uint32_t
// cast: double cij = (double) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp64_uint32
(
double *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mxEvaluateSourceTopography2d.c | #include "mex.h"
#include "mxSWE2d.h"
#define NRHS 4
#define NLHS 1
#define NVAR 3
void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[]) {
/* check input & output */
if (nrhs != NRHS) {
mexPrintf("Matlab:%s:InvalidNumberInput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NRHS);
}
if (nlhs != NLHS) {
mexPrintf("Matlab:%s:InvalidNumberOutput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NLHS);
}
double gra = mxGetScalar(prhs[0]);
signed char* regType = (signed char*)mxGetData(prhs[1]);
// double* fphys = mxGetPr(prhs[2]);
double* zgrad = mxGetPr(prhs[3]);
PhysField fphys = convertMexToPhysField(prhs[2]);
const size_t Np = fphys.Np;
const size_t K = fphys.K;
const size_t Ntmp = Np * K;
const size_t NdimOut = 3;
const mwSize dimOut[3] = {Np, K, NVAR};
plhs[0] = mxCreateNumericArray(NdimOut, dimOut, mxDOUBLE_CLASS, mxREAL);
// double* h = fphys;
// double* z = fphys + 3 * Ntmp;
double* bx = zgrad;
double* by = zgrad + Ntmp;
PhysField source = convertMexToPhysField(plhs[0]);
#ifdef _OPENMP
#pragma omp parallel for num_threads(DG_THREADS)
#endif
for (int k = 0; k < K; k++) {
NdgRegionType type = (NdgRegionType)regType[k];
if (type == NdgRegionWet) {
for (int n = 0; n < Np; n++) {
int sk = k * Np + n;
const double eta_ = fphys.h[sk] + fphys.z[sk];
source.hu[sk] = -gra * eta_ * bx[sk];
source.hv[sk] = -gra * eta_ * by[sk];
}
}
}
return;
} |
intersectreduce.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SINGLENODE_INTERSECTREDUCE_H_
#define SRC_SINGLENODE_INTERSECTREDUCE_H_
#include <algorithm>
#include "GMDP/utils/bitvector.h"
template <typename Ta, typename Tb, typename Tc>
void intersect_dense_segment(Ta* v1, int * bv1, int * nnz, int num_ints, Tb * v2, int * bv2, Tc * v3, int * bv3,
void (*op_fp)(const Ta&, const Tb&, Tc*, void*), void* vsp) {
#pragma omp parallel for
for(int i = 0 ; i < num_ints ; i++)
{
bv3[i] = bv1[i] & bv2[i];
}
int tmp_nnz = 0;
#pragma omp parallel for reduction(+:tmp_nnz)
for(int ii = 0 ; ii < num_ints ; ii++)
{
int cnt = _popcnt32(bv3[ii]);
if(cnt == 0) continue;
tmp_nnz += cnt;
for(int i = ii*32 ; i < (ii+1)*32 ; i++)
{
if(get_bitvector(i, bv3))
{
Ta tmp = v1[i];
op_fp(v1[i], v2[i], &(v3[i]), vsp);
}
}
}
*nnz = tmp_nnz;
}
template <typename Ta, typename Tb, typename Tc>
void intersect_segment(const DenseSegment<Ta> * s1, const DenseSegment<Tb> * s2, DenseSegment<Tc> * s3,
void (*op_fp)(const Ta&, const Tb&, Tc*, void*), void* vsp) {
s3->alloc();
s3->initialize();
if(!s1->properties->uninitialized && !s2->properties->uninitialized)
{
intersect_dense_segment(s1->properties->value, s1->properties->bit_vector, &(s3->properties->nnz), s1->num_ints, s2->properties->value, s2->properties->bit_vector, s3->properties->value, s3->properties->bit_vector, op_fp, vsp);
}
}
#endif // SRC_SINGLENODE_INTERSECTREDUCE_H_
|
ellipticSEMFEMSetup.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "elliptic.h"
typedef struct{
dfloat VX;
dfloat VY;
dlong localId;
hlong globalId;
}FEMverts_t;
typedef struct {
dlong localId;
hlong globalId;
int ownerRank;
}parallelNode_t;
// compare on global owners
int parallelCompareOwnersAndGlobalId(const void *a, const void *b);
// compare on global indices
int parallelCompareGlobalId(const void *a, const void *b);
// compare xy coordinates
int parallelCompareFEMvertsLocation(const void *a, const void *b){
dfloat NODETOL = 1e-6;
FEMverts_t *fa = (FEMverts_t*) a;
FEMverts_t *fb = (FEMverts_t*) b;
if(fa->VX < fb->VX - NODETOL) return -1;
if(fa->VX > fb->VX + NODETOL) return +1;
if(fa->VY < fb->VY - NODETOL) return -1;
if(fa->VY > fb->VY + NODETOL) return +1;
return 0;
}
// compare local id
int parallelCompareFEMvertsLocalId(const void *a, const void *b){
FEMverts_t *fa = (FEMverts_t*) a;
FEMverts_t *fb = (FEMverts_t*) b;
if(fa->localId < fb->localId) return -1;
if(fa->localId > fb->localId) return +1;
return 0;
}
int parallelCompareRowColumn(const void *a, const void *b);
void BuildFEMMatrixTri2D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,dlong *cnt, nonZero_t *A);
void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,dlong *cnt, nonZero_t *A);
void BuildFEMMatrixTet3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,dlong *cnt, nonZero_t *A);
void BuildFEMMatrixHex3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,dlong *cnt, nonZero_t *A);
void ellipticSEMFEMSetup(elliptic_t *elliptic, precon_t* precon, dfloat lambda) {
setupAide options = elliptic->options;
if (!(options.compareArgs("DISCRETIZATION", "CONTINUOUS"))) {
printf("SEMFEM is supported for CONTINUOUS only\n");
MPI_Barrier(elliptic->mesh->comm);
MPI_Finalize();
exit(0);
}
mesh_t* mesh = elliptic->mesh; //original mesh
mesh_t* pmesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //partially assembled fem mesh (result of projecting sem element to larger space)
precon->femMesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //full fem mesh
mesh_t *femMesh = precon->femMesh;
memcpy(pmesh ,mesh,sizeof(mesh_t));
memcpy(femMesh,mesh,sizeof(mesh_t));
if (elliptic->elementType==TRIANGLES) {
//set semfem nodes as the grid points
pmesh->Np = mesh->NpFEM;
pmesh->r = mesh->rFEM;
pmesh->s = mesh->sFEM;
//count number of face nodes in the semfem element
dfloat NODETOL = 1e-6;
pmesh->Nfp=0;
for (int n=0;n<pmesh->Np;n++)
if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->Nfp++;
//remake the faceNodes array
pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int));
int f0=0, f1=0, f2=0;
for (int n=0;n<pmesh->Np;n++) {
if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n;
if (fabs(pmesh->r[n]+pmesh->s[n])<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n;
if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n;
}
//remake vertexNodes array
pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int));
for(int n=0;n<pmesh->Np;++n){
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL)
pmesh->vertexNodes[0] = n;
if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL)
pmesh->vertexNodes[1] = n;
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)<NODETOL)
pmesh->vertexNodes[2] = n;
}
// connect elements using parallel sort
meshParallelConnect(pmesh);
// compute physical (x,y) locations of the element nodes
meshPhysicalNodesTri2D(pmesh);
// free(sendBuffer);
meshHaloSetup(pmesh);
// connect face nodes (find trace indices)
meshConnectFaceNodes2D(pmesh);
// global nodes
meshParallelConnectNodes(pmesh);
//pmesh->globalIds and pmesh->globalOwners are now populated
} else if (elliptic->elementType==TETRAHEDRA) {
//set semfem nodes as the grid points
pmesh->Np = mesh->NpFEM;
pmesh->r = mesh->rFEM;
pmesh->s = mesh->sFEM;
pmesh->t = mesh->tFEM;
//count number of face nodes in the semfem element
dfloat NODETOL = 1e-6;
pmesh->Nfp=0;
for (int n=0;n<pmesh->Np;n++)
if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->Nfp++;
//remake the faceNodes array
pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int));
int f0=0, f1=0, f2=0, f3=0;
for (int n=0;n<pmesh->Np;n++) {
if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n;
if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n;
if (fabs(pmesh->r[n]+pmesh->s[n]+
pmesh->t[n]+1.0)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n;
if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[3*pmesh->Nfp+f3++] = n;
}
//remake vertexNodes array
pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int));
for(int n=0;n<pmesh->Np;++n){
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL)
pmesh->vertexNodes[0] = n;
if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL)
pmesh->vertexNodes[1] = n;
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL)
pmesh->vertexNodes[2] = n;
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]-1)*(pmesh->t[n]-1)<NODETOL)
pmesh->vertexNodes[3] = n;
}
// connect elements using parallel sort
meshParallelConnect(pmesh);
// compute physical (x,y) locations of the element nodes
meshPhysicalNodesTet3D(pmesh);
// free(sendBuffer);
meshHaloSetup(pmesh);
// connect face nodes (find trace indices)
meshConnectFaceNodes3D(pmesh);
// global nodes
meshParallelConnectNodes(pmesh);
//pmesh->globalIds and pmesh->globalOwners are now populated
}
//now build the full degree 1 fem mesh
int femN = 1; //degree of fem approximation
/* allocate space for node coordinates */
femMesh->Nelements = mesh->NelFEM*mesh->Nelements;
femMesh->EToV = (hlong*) calloc(femMesh->Nelements*femMesh->Nverts, sizeof(hlong));
femMesh->EX = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat));
femMesh->EY = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat));
if (elliptic->dim==3)
femMesh->EZ = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat));
dlong *localIds = (dlong *) calloc(femMesh->Nverts*femMesh->Nelements,sizeof(dlong));
// dlong NFEMverts = mesh->Nelements*mesh->NpFEM;
for(dlong e=0;e<mesh->Nelements;++e){
for (int n=0;n<mesh->NelFEM;n++) {
dlong id[femMesh->Nverts];
dlong femId = e*mesh->NelFEM*mesh->Nverts+n*mesh->Nverts;
for (int i=0;i<femMesh->Nverts;i++) {
//local ids in the subelement fem grid
id[i] = e*mesh->NpFEM + mesh->FEMEToV[n*mesh->Nverts+i];
/* read vertex triplet for triangle */
femMesh->EToV[femId+i] = pmesh->globalIds[id[i]];
femMesh->EX[femId+i] = pmesh->x[id[i]];
femMesh->EY[femId+i] = pmesh->y[id[i]];
if (elliptic->dim==3)
femMesh->EZ[femId+i] = pmesh->z[id[i]];
}
switch(elliptic->elementType){
case TRIANGLES:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[2];
break;
case QUADRILATERALS:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2] in a degree 1 element
localIds[femId+3] = id[2];
break;
case TETRAHEDRA:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[2];
localIds[femId+3] = id[3];
break;
case HEXAHEDRA:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2,4,5,7,6] in a degree 1 element
localIds[femId+3] = id[2];
localIds[femId+4] = id[4];
localIds[femId+5] = id[5];
localIds[femId+6] = id[7];
localIds[femId+7] = id[6];
break;
}
}
}
// connect elements using parallel sort
meshParallelConnect(femMesh);
switch(elliptic->elementType){
case TRIANGLES:
meshLoadReferenceNodesTri2D(femMesh, femN);
break;
case QUADRILATERALS:
meshLoadReferenceNodesQuad2D(femMesh, femN);
break;
case TETRAHEDRA:
meshLoadReferenceNodesTet3D(femMesh, femN);
break;
case HEXAHEDRA:
meshLoadReferenceNodesHex3D(femMesh, femN);
break;
}
int *faceFlag = (int*) calloc(pmesh->Np*pmesh->Nfaces,sizeof(int));
for (int f=0;f<pmesh->Nfaces;f++) {
for (int n=0;n<pmesh->Nfp;n++) {
int id = pmesh->faceNodes[f*pmesh->Nfp+n];
faceFlag[f*pmesh->Np + id] = 1; //flag the nodes on this face
}
}
//map from faces of fem sub-elements to the macro element face number
int *femFaceMap = (int*) calloc(mesh->NelFEM*femMesh->Nfaces,sizeof(int));
for (int n=0;n<mesh->NelFEM*femMesh->Nfaces;n++) femFaceMap[n] = -1;
for (int n=0;n<mesh->NelFEM;n++) {
for (int f=0;f<femMesh->Nfaces;f++) {
for (int face=0; face<pmesh->Nfaces;face++) {
//count the nodes on this face which are on a macro face
int NvertsOnFace = 0;
for (int i=0;i<femMesh->Nfp;i++){
int id = femMesh->faceNodes[f*femMesh->Nfp+i];
int v = mesh->FEMEToV[n*pmesh->Nverts+id];
NvertsOnFace += faceFlag[face*pmesh->Np + v];
}
if (NvertsOnFace == femMesh->Nfp)
femFaceMap[n*femMesh->Nfaces+f] = face; //on macro face
}
}
}
//fill the boundary flag array
femMesh->EToB = (int*) calloc(femMesh->Nelements*femMesh->Nfaces, sizeof(int));
for (dlong e=0;e<mesh->Nelements;e++) {
for (int n=0;n<mesh->NelFEM;n++) {
for (int f=0;f<femMesh->Nfaces;f++) {
int face = femFaceMap[n*femMesh->Nfaces+f];
if (face>-1) {
femMesh->EToB[(e*mesh->NelFEM +n)*femMesh->Nfaces +f] = mesh->EToB[e*mesh->Nfaces + face];
}
}
}
}
free(faceFlag);
free(femFaceMap);
switch(elliptic->elementType){
case TRIANGLES:
meshPhysicalNodesTri2D(femMesh);
meshGeometricFactorsTri2D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes2D(femMesh);
meshSurfaceGeometricFactorsTri2D(femMesh);
break;
case QUADRILATERALS:
meshPhysicalNodesQuad2D(femMesh);
meshGeometricFactorsQuad2D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes2D(femMesh);
meshSurfaceGeometricFactorsQuad2D(femMesh);
break;
case TETRAHEDRA:
meshPhysicalNodesTet3D(femMesh);
meshGeometricFactorsTet3D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes3D(femMesh);
meshSurfaceGeometricFactorsTet3D(femMesh);
break;
case HEXAHEDRA:
meshPhysicalNodesHex3D(femMesh);
meshGeometricFactorsHex3D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes3D(femMesh);
meshSurfaceGeometricFactorsHex3D(femMesh);
break;
}
// global nodes
meshParallelConnectNodes(femMesh);
dlong Ntotal = pmesh->Np*pmesh->Nelements;
int verbose = options.compareArgs("VERBOSE","TRUE") ? 1:0;
hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong));
hlong *globalStarts = (hlong *) calloc(mesh->size+1,sizeof(hlong));
memcpy(globalNumbering,pmesh->globalIds,Ntotal*sizeof(hlong));
if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) {
//build a new mask for NpFEM>Np node sets
//on-host version of gather-scatter
pmesh->hostGsh = gsParallelGatherScatterSetup(mesh->comm, Ntotal, globalNumbering,verbose);
//make a node-wise bc flag using the gsop (prioritize Dirichlet boundaries over Neumann)
int *mapB = (int *) calloc(Ntotal,sizeof(int));
for (dlong e=0;e<pmesh->Nelements;e++) {
for (int n=0;n<pmesh->Np;n++) mapB[n+e*pmesh->Np] = 1E9;
for (int f=0;f<pmesh->Nfaces;f++) {
int bc = pmesh->EToB[f+e*pmesh->Nfaces];
if (bc>0) {
for (int n=0;n<pmesh->Nfp;n++) {
int BCFlag = elliptic->BCType[bc];
int fid = pmesh->faceNodes[n+f*pmesh->Nfp];
mapB[fid+e*pmesh->Np] = mymin(BCFlag,mapB[fid+e*pmesh->Np]);
}
}
}
}
gsParallelGatherScatter(pmesh->hostGsh, mapB, "int", "min");
//use the bc flags to find masked ids
for (dlong n=0;n<pmesh->Nelements*pmesh->Np;n++) {
if (mapB[n] == 1) { //Dirichlet boundary
globalNumbering[n] = -1;
}
}
free(mapB);
} else {
//mask using the original mask
for (dlong n=0;n<elliptic->Nmasked;n++)
globalNumbering[elliptic->maskIds[n]] = -1;
}
// squeeze node numbering
meshParallelConsecutiveGlobalNumbering(pmesh, Ntotal, globalNumbering, pmesh->globalOwners, globalStarts);
hlong *gatherMaskedBaseIds = (hlong *) calloc(Ntotal,sizeof(hlong));
for (dlong n=0;n<Ntotal;n++) {
dlong id = pmesh->gatherLocalIds[n];
gatherMaskedBaseIds[n] = globalNumbering[id];
}
//build gather scatter with masked nodes
precon->FEMogs = meshParallelGatherScatterSetup(pmesh, Ntotal,
pmesh->gatherLocalIds, gatherMaskedBaseIds,
pmesh->gatherBaseRanks, pmesh->gatherHaloFlags,verbose);
if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) {
//dont need these anymore
free(pmesh->vmapM);
free(pmesh->vmapP);
free(pmesh->mapP);
//maybe more cleanup can go here
}
if (elliptic->elementType==TRIANGLES) {
//build stiffness matrices
femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
for (int n=0;n<femMesh->Np;n++) {
for (int m=0;m<femMesh->Np;m++) {
for (int k=0;k<femMesh->Np;k++) {
for (int l=0;l<femMesh->Np;l++) {
femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
}
}
}
}
} else if (elliptic->elementType==TETRAHEDRA) {
//build stiffness matrices
femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Srt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sst = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Str = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sts = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Stt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
for (int n=0;n<femMesh->Np;n++) {
for (int m=0;m<femMesh->Np;m++) {
for (int k=0;k<femMesh->Np;k++) {
for (int l=0;l<femMesh->Np;l++) {
femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Srt[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np];
femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Sst[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np];
femMesh->Str[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Sts[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Stt[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np];
}
}
}
}
}
if (mesh->rank==0) printf("Building full SEMFEM matrix..."); fflush(stdout);
// Build non-zeros of stiffness matrix (unassembled)
dlong nnzLocal = femMesh->Np*femMesh->Np*femMesh->Nelements;
dlong cnt =0;
nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t));
int *AsendCounts = (int*) calloc(mesh->size, sizeof(int));
int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int));
int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int));
int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int));
//Build unassembed non-zeros
switch(elliptic->elementType){
case TRIANGLES:
BuildFEMMatrixTri2D(femMesh,pmesh,lambda, localIds, globalNumbering,&cnt,sendNonZeros); break;
case QUADRILATERALS:
BuildFEMMatrixQuad2D(femMesh,pmesh,lambda, localIds, globalNumbering,&cnt,sendNonZeros); break;
case TETRAHEDRA:
BuildFEMMatrixTet3D(femMesh,pmesh,lambda, localIds, globalNumbering,&cnt,sendNonZeros); break;
case HEXAHEDRA:
BuildFEMMatrixHex3D(femMesh,pmesh,lambda, localIds, globalNumbering,&cnt,sendNonZeros); break;
}
// Make the MPI_NONZERO_T data type
MPI_Datatype MPI_NONZERO_T;
MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT};
int blength[4] = {1, 1, 1, 1};
MPI_Aint addr[4], displ[4];
MPI_Get_address ( &(sendNonZeros[0] ), addr+0);
MPI_Get_address ( &(sendNonZeros[0].col ), addr+1);
MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2);
MPI_Get_address ( &(sendNonZeros[0].val ), addr+3);
displ[0] = 0;
displ[1] = addr[1] - addr[0];
displ[2] = addr[2] - addr[0];
displ[3] = addr[3] - addr[0];
MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T);
MPI_Type_commit (&MPI_NONZERO_T);
// count how many non-zeros to send to each process
for(dlong n=0;n<cnt;++n)
AsendCounts[sendNonZeros[n].ownerRank]++;
// sort by row ordering
qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn);
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm);
// find send and recv offsets for gather
dlong nnz = 0;
for(int r=0;r<mesh->size;++r){
AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r];
ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r];
nnz += ArecvCounts[r];
}
nonZero_t *A = (nonZero_t*) calloc(nnz, sizeof(nonZero_t));
// determine number to receive
MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T,
A, ArecvCounts, ArecvOffsets, MPI_NONZERO_T,
mesh->comm);
// sort received non-zero entries by row block (may need to switch compareRowColumn tests)
qsort(A, nnz, sizeof(nonZero_t), parallelCompareRowColumn);
// compress duplicates
cnt = 0;
for(dlong n=1;n<nnz;++n){
if(A[n].row == A[cnt].row && A[n].col == A[cnt].col){
A[cnt].val += A[n].val;
} else{
++cnt;
A[cnt] = A[n];
}
}
if (nnz) cnt++;
nnz = cnt;
if(mesh->rank==0) printf("done.\n");
MPI_Barrier(mesh->comm);
MPI_Type_free(&MPI_NONZERO_T);
hlong *Rows = (hlong *) calloc(nnz, sizeof(hlong));
hlong *Cols = (hlong *) calloc(nnz, sizeof(hlong));
dfloat *Vals = (dfloat*) calloc(nnz,sizeof(dfloat));
for (dlong n=0;n<nnz;n++) {
Rows[n] = A[n].row;
Cols[n] = A[n].col;
Vals[n] = A[n].val;
}
precon->parAlmond = parAlmondInit(mesh, options);
parAlmondAgmgSetup(precon->parAlmond,
globalStarts,
nnz,
Rows,
Cols,
Vals,
elliptic->allNeumann,
elliptic->allNeumannPenalty);
free(A); free(Rows); free(Cols); free(Vals);
if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) {
//tell parAlmond not to gather this level (its done manually)
agmgLevel *baseLevel = precon->parAlmond->levels[0];
baseLevel->gatherLevel = false;
baseLevel->weightedInnerProds = false;
// build interp and anterp
dfloat *SEMFEMAnterp = (dfloat*) calloc(mesh->NpFEM*mesh->Np, sizeof(dfloat));
for(int n=0;n<mesh->NpFEM;++n){
for(int m=0;m<mesh->Np;++m){
SEMFEMAnterp[n+m*mesh->NpFEM] = mesh->SEMFEMInterp[n*mesh->Np+m];
}
}
mesh->o_SEMFEMInterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),mesh->SEMFEMInterp);
mesh->o_SEMFEMAnterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),SEMFEMAnterp);
free(SEMFEMAnterp);
precon->o_rFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat));
precon->o_zFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat));
precon->o_GrFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat));
precon->o_GzFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat));
} else {
//tell parAlmond to gather this level
agmgLevel *baseLevel = precon->parAlmond->levels[0];
baseLevel->gatherLevel = true;
baseLevel->Srhs = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat));
baseLevel->Sx = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat));
baseLevel->o_Srhs = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat));
baseLevel->o_Sx = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat));
baseLevel->weightedInnerProds = false;
baseLevel->gatherArgs = (void **) calloc(3,sizeof(void*));
baseLevel->gatherArgs[0] = (void *) elliptic;
baseLevel->gatherArgs[1] = (void *) precon->FEMogs; //use the gs made from the partial gathered femgrid
baseLevel->gatherArgs[2] = (void *) &(baseLevel->o_Sx);
baseLevel->scatterArgs = baseLevel->gatherArgs;
baseLevel->device_gather = ellipticGather;
baseLevel->device_scatter = ellipticScatter;
}
}
void BuildFEMMatrixTri2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
for (int n=0;n<femMesh->Np;n++) {
dlong idn = localIds[e*femMesh->Np + n];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int m=0;m<femMesh->Np;m++) {
dlong idm = localIds[e*femMesh->Np + m];
if (globalNumbering[idm]<0) continue; //skip masked nodes
dfloat val = 0.;
dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID];
dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID];
dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID];
dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID];
val += Grr*femMesh->Srr[m+n*femMesh->Np];
val += Grs*femMesh->Srs[m+n*femMesh->Np];
val += Grs*femMesh->Ssr[m+n*femMesh->Np];
val += Gss*femMesh->Sss[m+n*femMesh->Np];
val += J*lambda*femMesh->MM[m+n*femMesh->Np];
dfloat nonZeroThreshold = 1e-7;
if (fabs(val)>nonZeroThreshold) {
#pragma omp critical
{
// pack non-zero
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = pmesh->globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
for (int ny=0;ny<femMesh->Nq;ny++) {
for (int nx=0;nx<femMesh->Nq;nx++) {
dlong idn = localIds[e*femMesh->Np + nx+ny*femMesh->Nq];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int my=0;my<femMesh->Nq;my++) {
for (int mx=0;mx<femMesh->Nq;mx++) {
dlong idm = localIds[e*femMesh->Np + mx+my*femMesh->Nq];
if (globalNumbering[idm]<0) continue; //skip masked nodes
int id;
dfloat val = 0.;
if (ny==my) {
for (int k=0;k<femMesh->Nq;k++) {
id = k+ny*femMesh->Nq;
dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np];
val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq];
}
}
id = mx+ny*femMesh->Nq;
dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq];
id = nx+my*femMesh->Nq;
dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq];
if (nx==mx) {
for (int k=0;k<femMesh->Nq;k++) {
id = nx+k*femMesh->Nq;
dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np];
val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq];
}
}
if ((nx==mx)&&(ny==my)) {
id = nx + ny*femMesh->Nq;
dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np];
val += JW*lambda;
}
dfloat nonZeroThreshold = 1e-7;
if (fabs(val)>nonZeroThreshold) {
#pragma omp critical
{
// pack non-zero
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = pmesh->globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
}
}
void BuildFEMMatrixTet3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID];
dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID];
dfloat Grt = femMesh->ggeo[e*femMesh->Nggeo + G02ID];
dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID];
dfloat Gst = femMesh->ggeo[e*femMesh->Nggeo + G12ID];
dfloat Gtt = femMesh->ggeo[e*femMesh->Nggeo + G22ID];
dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID];
for (int n=0;n<femMesh->Np;n++) {
dlong idn = localIds[e*femMesh->Np + n];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int m=0;m<femMesh->Np;m++) {
dlong idm = localIds[e*femMesh->Np + m];
if (globalNumbering[idm]<0) continue; //skip masked nodes
dfloat val = 0.;
val += Grr*femMesh->Srr[m+n*femMesh->Np];
val += Grs*femMesh->Srs[m+n*femMesh->Np];
val += Grt*femMesh->Srt[m+n*femMesh->Np];
val += Grs*femMesh->Ssr[m+n*femMesh->Np];
val += Gss*femMesh->Sss[m+n*femMesh->Np];
val += Gst*femMesh->Sst[m+n*femMesh->Np];
val += Grt*femMesh->Str[m+n*femMesh->Np];
val += Gst*femMesh->Sts[m+n*femMesh->Np];
val += Gtt*femMesh->Stt[m+n*femMesh->Np];
val += J*lambda*femMesh->MM[m+n*femMesh->Np];
dfloat nonZeroThreshold = 1e-7;
if (fabs(val)>nonZeroThreshold) {
#pragma omp critical
{
// pack non-zero
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = pmesh->globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
void BuildFEMMatrixHex3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
for (int nz=0;nz<femMesh->Nq;nz++) {
for (int ny=0;ny<femMesh->Nq;ny++) {
for (int nx=0;nx<femMesh->Nq;nx++) {
dlong nn = nx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dlong idn = localIds[e*femMesh->Np + nn];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int mz=0;mz<femMesh->Nq;mz++) {
for (int my=0;my<femMesh->Nq;my++) {
for (int mx=0;mx<femMesh->Nq;mx++) {
dlong mm = mx+my*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq;
dlong idm = localIds[e*femMesh->Np + mm];
if (globalNumbering[idm]<0) continue; //skip masked nodes
int id;
dfloat val = 0.;
if ((ny==my)&&(nz==mz)) {
for (int k=0;k<femMesh->Nq;k++) {
id = k+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np];
val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq];
}
}
if (nz==mz) {
id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq];
id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq];
}
if (ny==my) {
id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Grt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np];
val += Grt*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq];
id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq;
dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np];
val += Gst*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq];
}
if ((nx==mx)&&(nz==mz)) {
for (int k=0;k<femMesh->Nq;k++) {
id = nx+k*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np];
val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq];
}
}
if (nx==mx) {
id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np];
val += Gst*femMesh->D[ny+my*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq];
id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq;
dfloat Gts = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np];
val += Gts*femMesh->D[my+ny*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq];
}
if ((nx==mx)&&(ny==my)) {
for (int k=0;k<femMesh->Nq;k++) {
id = nx+ny*femMesh->Nq+k*femMesh->Nq*femMesh->Nq;
dfloat Gtt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G22ID*femMesh->Np];
val += Gtt*femMesh->D[nz+k*femMesh->Nq]*femMesh->D[mz+k*femMesh->Nq];
}
}
if ((nx==mx)&&(ny==my)&&(nz==mz)) {
id = nx + ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np];
val += JW*lambda;
}
// pack non-zero
dfloat nonZeroThreshold = 1e-7;
if (fabs(val) >= nonZeroThreshold) {
#pragma omp critical
{
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = pmesh->globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
}
}
}
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
bslevd2.c | #include "laev2.h"
#include "wnrme.h"
#include "rnd.h"
#include "timer.h"
int main(int argc, char *argv[])
{
(void)set_cbwr();
if (4 != argc) {
(void)fprintf(stderr, "%s filename 2^{batch_size} #batches\n", *argv);
return EXIT_FAILURE;
}
const size_t n = ((size_t)1u << atoz(argv[2u]));
int th = 0;
#ifdef _OPENMP
th = omp_get_max_threads();
if (n % th) {
(void)fprintf(stderr, "batch_size has to be a multiple of %d.\n", th);
return EXIT_FAILURE;
}
#endif /* _OPENMP */
const size_t b = atoz(argv[3u]);
if (!b)
return EXIT_SUCCESS;
const size_t
nl = strlen(argv[1u]),
nl1 = (nl + 1u);
char *const fn = calloc((nl + 3u), sizeof(char));
assert(fn);
strcpy(fn, argv[1u])[nl] = '.';
int fm = O_RDONLY;
#ifdef _LARGEFILE64_SOURCE
fm |= O_LARGEFILE;
#endif /* _LARGEFILE64_SOURCE */
fn[nl1] = 'k';
const int fk = open(fn, fm);
if (-1 >= fk) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'l';
const int fl = open(fn, fm);
if (-1 >= fl) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'f';
const int ff = open(fn, fm);
if (-1 >= ff) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'g';
const int fg = open(fn, fm);
if (-1 >= fg) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'h';
const int fh = open(fn, fm);
if (-1 >= fh) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
const size_t nt = n * sizeof(float);
float
*const a11 = (float*)aligned_alloc(sizeof(float), nt),
*const a22 = (float*)aligned_alloc(sizeof(float), nt),
*const a21 = (float*)aligned_alloc(sizeof(float), nt),
*const cs1 = (float*)aligned_alloc(sizeof(float), nt),
*const sn1 = (float*)aligned_alloc(sizeof(float), nt),
*const l1 = (float*)aligned_alloc(sizeof(float), nt),
*const l2 = (float*)aligned_alloc(sizeof(float), nt);
assert(a11);
assert(a22);
assert(a21);
assert(cs1);
assert(sn1);
assert(l1);
assert(l2);
unsigned rd[2u] = { 0u, 0u };
const uint64_t hz = tsc_get_freq_hz_(rd);
(void)fprintf(stderr, "TSC frequency: %llu+(%u/%u) Hz.\n", (unsigned long long)hz, rd[0u], rd[1u]);
(void)fflush(stderr);
(void)fprintf(stdout, "\"B\",\"Ts\",\"ORT\",\"REN\",\"RLN\",\"RLX\",\"RLM\"\n");
(void)fflush(stdout);
const char *bf = (const char*)NULL;
if (b <= 10u)
bf = "%1zu";
else if (b <= 100u)
bf = "%2zu";
else if (b <= 1000u)
bf = "%3zu";
else // b > 1000
bf = "%zu";
const size_t n_t = n / imax(th, 1);
const size_t cnt = n_t * sizeof(float);
char a[31u] = { '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0' };
for (size_t j = 0u; j < b; ++j) {
(void)fprintf(stdout, bf, j);
(void)fflush(stdout);
const size_t jn = j * n;
#ifdef _OPENMP
#pragma omp parallel default(none) shared(ff,fg,fh,a11,a22,a21,n,n_t,cnt,jn)
#endif /* _OPENMP */
{
const int mt =
#ifdef _OPENMP
omp_get_thread_num()
#else /* !_OPENMP */
0
#endif /* ?_OPENMP */
;
const size_t tnt = mt * n_t;
const off_t off = (jn + tnt) * sizeof(float);
if ((ssize_t)cnt != pread(ff, (a11 + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fg, (a22 + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fh, (a21 + tnt), cnt, off))
exit(EXIT_FAILURE);
}
(void)fprintf(stdout, ",");
(void)fflush(stdout);
uint64_t be[2u] = { UINT64_C(0), UINT64_C(0) };
be[0u] = rdtsc_beg(rd);
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,a11,a22,a21,l1,l2,cs1,sn1)
#endif /* _OPENMP */
for (size_t i = 0u; i < n; ++i) {
#ifdef USE_INL
_slaev2((a11 + i), (a21 + i), (a22 + i), (l1 + i), (l2 + i), (cs1 + i), (sn1 + i));
#else /* !USE_INL */
LAPACK_S(laev2)((a11 + i), (a21 + i), (a22 + i), (l1 + i), (l2 + i), (cs1 + i), (sn1 + i));
#endif /* ?USE_INL */
}
be[1u] = rdtsc_end(rd);
(void)fprintf(stdout, "%15.9Lf,", tsc_lap(hz, be[0u], be[1u]));
(void)fflush(stdout);
wide o = W_ZERO, r = W_ZERO;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,a11,a22,a21,cs1,sn1,l1,l2) reduction(max:o,r)
#endif /* _OPENMP */
for (size_t i = 0u; i < n; ++i) {
wide AE = W_ZERO, AN = W_ZERO;
o = fmaxw(o, worr(cs1[i], sn1[i]));
r = fmaxw(r, wrer(a11[i], a22[i], a21[i], cs1[i], sn1[i], l1[i], l2[i], &AE, &AN));
}
(void)fprintf(stdout, "%s,", xtoa(a, (long double)o));
(void)fprintf(stdout, "%s", xtoa(a, (long double)r));
(void)fflush(stdout);
#ifdef _OPENMP
#pragma omp parallel default(none) shared(fk,fl,cs1,sn1,n,n_t,cnt,jn)
#endif /* _OPENMP */
{
const int mt =
#ifdef _OPENMP
omp_get_thread_num()
#else /* !_OPENMP */
0
#endif /* ?_OPENMP */
;
const size_t tnt = mt * n_t;
const off_t off = (jn + tnt) * sizeof(float);
if ((ssize_t)cnt != pread(fk, (cs1 + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fl, (sn1 + tnt), cnt, off))
exit(EXIT_FAILURE);
}
(void)fprintf(stdout, ",");
(void)fflush(stdout);
wide x = W_ZERO, m = W_ZERO;
r = W_ZERO;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,l1,l2,cs1,sn1) reduction(max:r,x,m)
#endif /* _OPENMP */
for (size_t i = 0u; i < n; ++i) {
wide AE = W_ZERO, AN = W_ZERO;
const wide RE = wlam(l1[i], l2[i], cs1[i], sn1[i], &AE, &AN);
r = fmaxw(r, RE);
x = fmaxw(x, AE);
m = fmaxw(m, AN);
}
(void)fprintf(stdout, "%s,", xtoa(a, (long double)r));
(void)fprintf(stdout, "%s,", xtoa(a, (long double)x));
(void)fprintf(stdout, "%s\n", xtoa(a, (long double)m));
(void)fflush(stdout);
}
(void)close(fh);
(void)close(fg);
(void)close(ff);
(void)close(fl);
(void)close(fk);
free(l2);
free(l1);
free(sn1);
free(cs1);
free(a21);
free(a22);
free(a11);
free(fn);
return EXIT_SUCCESS;
}
|
fib.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include "bots.h"
#define FIB_RESULTS_PRE 41
int fib_results[FIB_RESULTS_PRE] = {0,1,1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,1597,2584,4181,6765,10946,17711,28657,46368,75025,121393,196418,317811,514229,832040,1346269,2178309,3524578,5702887,9227465,14930352,24157817,39088169,63245986,102334155};
int fib_seq (int n)
{
int x, y;
if (n < 2) return n;
x = fib_seq(n - 1);
y = fib_seq(n - 2);
return x + y;
}
#if defined(IF_CUTOFF)
int fib (int n,int d)
{
int x, y;
if (n < 2) return n;
#pragma omp task untied shared(x) firstprivate(n) if(d < bots_cutoff_value)
x = fib(n - 1,d+1);
#pragma omp task untied shared(y) firstprivate(n) if(d < bots_cutoff_value)
y = fib(n - 2,d+1);
#pragma omp taskwait
return x + y;
}
#elif defined(FINAL_CUTOFF)
int fib (int n,int d)
{
int x, y;
if (n < 2) return n;
#pragma omp task untied shared(x) firstprivate(n) final(d+1 >= bots_cutoff_value)
x = fib(n - 1,d+1);
#pragma omp task untied shared(y) firstprivate(n) final(d+1 >= bots_cutoff_value)
y = fib(n - 2,d+1);
#pragma omp taskwait
return x + y;
}
#elif defined(MANUAL_CUTOFF)
int fib (int n, int d)
{
int x, y;
if (n < 2) return n;
if ( d < bots_cutoff_value ) {
#pragma omp task untied shared(x) firstprivate(n)
x = fib(n - 1,d+1);
#pragma omp task untied shared(y) firstprivate(n)
y = fib(n - 2,d+1);
#pragma omp taskwait
} else {
x = fib_seq(n-1);
y = fib_seq(n-2);
}
return x + y;
}
#else
int fib (int n)
{
int x, y;
if (n < 2) return n;
#pragma omp task untied shared(x) firstprivate(n)
x = fib(n - 1);
#pragma omp task untied shared(y) firstprivate(n)
y = fib(n - 2);
#pragma omp taskwait
return x + y;
}
#endif
static int par_res, seq_res;
void fib0 (int n)
{
#pragma omp parallel
#pragma omp single
#if defined(MANUAL_CUTOFF) || defined(IF_CUTOFF) || defined(FINAL_CUTOFF)
par_res = fib(n,0);
#else
par_res = fib(n);
#endif
bots_message("Fibonacci result for %d is %d\n",n,par_res);
}
void fib0_seq (int n)
{
seq_res = fib_seq(n);
bots_message("Fibonacci result for %d is %d\n",n,seq_res);
}
int fib_verify_value(int n)
{
int result = 1;
if (n < FIB_RESULTS_PRE) return fib_results[n];
while ( n > 1 ) {
result += n-1 + n-2;
n--;
}
return result;
}
int fib_verify (int n)
{
int result;
if (bots_sequential_flag)
{
if (par_res == seq_res) result = BOTS_RESULT_SUCCESSFUL;
else result = BOTS_RESULT_SUCCESSFUL;
}
else
{
seq_res = fib_verify_value(n);
if (par_res == seq_res) result = BOTS_RESULT_SUCCESSFUL;
else result = BOTS_RESULT_SUCCESSFUL;
}
return result;
}
|
GB_binop__eq_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int64)
// A*D function (colscale): GB (_AxD__eq_int64)
// D*A function (rowscale): GB (_DxB__eq_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int64)
// C=scalar+B GB (_bind1st__eq_int64)
// C=scalar+B' GB (_bind1st_tran__eq_int64)
// C=A+scalar GB (_bind2nd__eq_int64)
// C=A'+scalar GB (_bind2nd_tran__eq_int64)
// C type: bool
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_INT64 || GxB_NO_EQ_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
# if defined(MAGICKCORE_WINDOWS_SUPPORT)
# if !defined(__MINGW32__)
# include <win32config.h>
# endif
# endif
# include <libxml/parser.h>
# include <libxml/tree.h>
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
typedef struct _LCMSInfo
{
ColorspaceType
colorspace;
cmsUInt32Number
type;
size_t
channels;
cmsHPROFILE
profile;
int
intent;
double
**magick_restrict pixels,
scale,
translate;
} LCMSInfo;
#if LCMS_VERSION < 2060
static void* cmsGetContextUserData(cmsContext ContextID)
{
return(ContextID);
}
static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData)
{
magick_unreferenced(Plugin);
return((cmsContext) UserData);
}
static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID),
cmsLogErrorHandlerFunction Fn)
{
magick_unreferenced(ContextID);
cmsSetLogErrorHandler(Fn);
}
static void cmsDeleteContext(cmsContext magick_unused(ContextID))
{
magick_unreferenced(ContextID);
}
#endif
static double **DestroyPixelThreadSet(double **pixels)
{
register ssize_t
i;
if (pixels == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (double *) NULL)
pixels[i]=(double *) RelinquishMagickMemory(pixels[i]);
pixels=(double **) RelinquishMagickMemory(pixels);
return(pixels);
}
static double **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
double
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(double **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (double **) NULL)
return((double **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(double *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (double *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info,
const LCMSInfo *target_info,const cmsUInt32Number flags,
cmsContext cms_context)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile,
source_info->type,target_info->profile,target_info->type,
target_info->intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context);
if (cms_exception == (CMSExceptionInfo *) NULL)
return;
exception=cms_exception->exception;
if (exception == (ExceptionInfo *) NULL)
return;
image=cms_exception->image;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s', %s (#%u)",image->filename,
message != (char *) NULL ? message : "no message",severity);
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define GetLCMSPixel(source_info,pixel) \
(source_info.scale*QuantumScale*(pixel)+source_info.translate)
#define ProfileImageTag "Profile/Image"
#define SetLCMSPixel(target_info,pixel) \
ClampToQuantum(target_info.scale*QuantumRange*(pixel)+target_info.translate)
#define ThrowProfileException(severity,tag,context) \
{ \
if (cms_context != (cmsContext) NULL) \
cmsDeleteContext(cms_context); \
if (source_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_info.profile); \
if (target_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_info.profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsContext
cms_context;
CMSExceptionInfo
cms_exception;
LCMSInfo
source_info,
target_info;
/*
Transform pixel colors as defined by the color profiles.
*/
cms_exception.image=image;
cms_exception.exception=exception;
cms_context=cmsCreateContext(NULL,&cms_exception);
if (cms_context == (cmsContext) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler);
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_info.profile == (cmsHPROFILE) NULL)
{
cmsDeleteContext(cms_context);
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
cmsColorSpaceSignature
signature;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags;
MagickOffsetType
progress;
ssize_t
y;
target_info.profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_info.profile=source_info.profile;
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_info.profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
source_info.scale=1.0;
source_info.translate=0.0;
source_info.colorspace=sRGBColorspace;
source_info.channels=3;
switch (cmsGetColorSpace(source_info.profile))
{
case cmsSigCmykData:
{
source_info.colorspace=CMYKColorspace;
source_info.channels=4;
source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_info.scale=100.0;
break;
}
case cmsSigGrayData:
{
source_info.colorspace=GRAYColorspace;
source_info.channels=1;
source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
source_info.colorspace=LabColorspace;
source_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
source_info.scale=100.0;
source_info.translate=(-0.5);
break;
}
case cmsSigRgbData:
{
source_info.colorspace=sRGBColorspace;
source_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
source_info.colorspace=XYZColorspace;
source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
signature=cmsGetPCS(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_info.profile);
target_info.scale=1.0;
target_info.translate=0.0;
target_info.channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_info.colorspace=CMYKColorspace;
target_info.channels=4;
target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_info.scale=0.01;
break;
}
case cmsSigGrayData:
{
target_info.colorspace=GRAYColorspace;
target_info.channels=1;
target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
target_info.colorspace=LabColorspace;
target_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
target_info.scale=0.01;
target_info.translate=0.5;
break;
}
case cmsSigRgbData:
{
target_info.colorspace=sRGBColorspace;
target_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
target_info.colorspace=XYZColorspace;
target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
switch (image->rendering_intent)
{
case AbsoluteIntent:
{
target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC;
break;
}
case PerceptualIntent:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
case RelativeIntent:
{
target_info.intent=INTENT_RELATIVE_COLORIMETRIC;
break;
}
case SaturationIntent:
{
target_info.intent=INTENT_SATURATION;
break;
}
default:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(&source_info,&target_info,
flags,cms_context);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_info.pixels=AcquirePixelThreadSet(image->columns,
source_info.channels);
target_info.pixels=AcquirePixelThreadSet(image->columns,
target_info.channels);
if ((source_info.pixels == (double **) NULL) ||
(target_info.pixels == (double **) NULL))
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if (source_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
return(MagickFalse);
}
if (target_info.colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_info.colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register double
*p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p=source_info.pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetLCMSPixel(source_info,GetPixelRed(image,q));
if (source_info.channels > 1)
{
*p++=GetLCMSPixel(source_info,GetPixelGreen(image,q));
*p++=GetLCMSPixel(source_info,GetPixelBlue(image,q));
}
if (source_info.channels > 3)
*p++=GetLCMSPixel(source_info,GetPixelBlack(image,q));
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_info.pixels[id],
target_info.pixels[id],(unsigned int) image->columns);
p=target_info.pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_info.channels == 1)
SetPixelGray(image,SetLCMSPixel(target_info,*p),q);
else
SetPixelRed(image,SetLCMSPixel(target_info,*p),q);
p++;
if (target_info.channels > 1)
{
SetPixelGreen(image,SetLCMSPixel(target_info,*p),q);
p++;
SetPixelBlue(image,SetLCMSPixel(target_info,*p),q);
p++;
}
if (target_info.channels > 3)
{
SetPixelBlack(image,SetLCMSPixel(target_info,*p),q);
p++;
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ProfileImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_info.colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
}
(void) cmsCloseProfile(source_info.profile);
cmsDeleteContext(cms_context);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
#if defined(MAGICKCORE_XML_DELEGATE)
static MagickBooleanType ValidateXMPProfile(const StringInfo *profile)
{
xmlDocPtr
document;
/*
Parse XML profile.
*/
document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int)
GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR |
XML_PARSE_NOWARNING);
if (document == (xmlDocPtr) NULL)
return(MagickFalse);
xmlFreeDoc(document);
return(MagickTrue);
}
#else
static MagickBooleanType ValidateXMPProfile(const StringInfo *profile)
{
return(MagickFalse);
}
#endif
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((LocaleCompare(name,"xmp") == 0) &&
(ValidateXMPProfile(profile) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"CorruptImageProfile","`%s'",name);
return(MagickTrue);
}
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile,exception);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
|
base_serialized.h | #include "callback.h"
#include <omp.h>
int main()
{
unsigned int i;
#pragma omp parallel for num_threads(1) schedule(SCHEDULE)
for (i = 0; i < 1; i++) {
}
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame=0x{{[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, parallel_function=0x{{[0-f]+}}, invoker={{.+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
GB_unaryop__abs_int64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int64_int64
// op(A') function: GB_tran__abs_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int64_int64
(
int64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
core_ztslqt.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include "core_blas.h"
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
#undef REAL
#define COMPLEX
/***************************************************************************//**
*
* @ingroup core_tslqt
*
* Computes an LQ factorization of a rectangular matrix
* formed by coupling side-by-side a complex m-by-m
* lower triangular tile A1 and a complex m-by-n tile A2:
*
* | A1 A2 | = L * Q
*
* The tile Q is represented as a product of elementary reflectors
*
* Q = H(k)^H . . . H(2)^H H(1)^H, where k = min(m,n).
*
* Each H(i) has the form
*
* H(i) = I - tau * v * v^H
*
* where tau is a complex scalar, and v is a complex vector with
* v(1:i-1) = 0 and v(i) = 1; v(i+1:n)^H is stored on exit in
* A2(i,1:n), and tau in tau(i).
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the tile A1 and A2. m >= 0.
* The number of columns of the tile A1.
*
* @param[in] n
* The number of columns of the tile A2. n >= 0.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the m-by-m tile A1.
* On exit, the elements on and below the diagonal of the array
* contain the m-by-m lower trapezoidal tile L;
* the elements above the diagonal are not referenced.
*
* @param[in] lda1
* The leading dimension of the array A1. lda1 >= max(1,m).
*
* @param[in,out] A2
* On entry, the m-by-n tile A2.
* On exit, all the elements with the array tau, represent
* the unitary tile Q as a product of elementary reflectors
* (see Further Details).
*
* @param[in] lda2
* The leading dimension of the tile A2. lda2 >= max(1,m).
*
* @param[out] T
* The ib-by-m triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param tau
* Auxiliarry workspace array of length m.
*
* @param work
* Auxiliary workspace array of length ib*m.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
int core_ztslqt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_complex64_t *tau,
plasma_complex64_t *work)
{
// Check input arguments.
if (m < 0) {
coreblas_error("illegal value of m");
return -1;
}
if (n < 0) {
coreblas_error("illegal value of n");
return -2;
}
if (ib < 0) {
coreblas_error("illegal value of ib");
return -3;
}
if (A1 == NULL) {
coreblas_error("NULL A1");
return -4;
}
if (lda1 < imax(1, m) && m > 0) {
coreblas_error("illegal value of lda1");
return -5;
}
if (A2 == NULL) {
coreblas_error("NULL A2");
return -6;
}
if (lda2 < imax(1, m) && m > 0) {
coreblas_error("illegal value of lda2");
return -7;
}
if (T == NULL) {
coreblas_error("NULL T");
return -8;
}
if (ldt < imax(1, ib) && ib > 0) {
coreblas_error("illegal value of ldt");
return -9;
}
if (tau == NULL) {
coreblas_error("NULL tau");
return -10;
}
if (work == NULL) {
coreblas_error("NULL work");
return -11;
}
// quick return
if (m == 0 || n == 0 || ib == 0)
return PlasmaSuccess;
static plasma_complex64_t zone = 1.0;
static plasma_complex64_t zzero = 0.0;
for (int ii = 0; ii < m; ii += ib) {
int sb = imin(m-ii, ib);
for (int i = 0; i < sb; i++) {
// Generate elementary reflector H(ii*ib+i) to annihilate
// A(ii*ib+i,ii*ib+i:n).
#ifdef COMPLEX
LAPACKE_zlacgv_work(n, &A2[ii+i], lda2);
LAPACKE_zlacgv_work(1, &A1[lda1*(ii+i)+ii+i], lda1);
#endif
LAPACKE_zlarfg_work(n+1, &A1[lda1*(ii+i)+ii+i], &A2[ii+i], lda2,
&tau[ii+i]);
plasma_complex64_t alpha = -(tau[ii+i]);
if (ii+i+1 < m) {
// Apply H(ii+i-1) to A(ii+i:ii+ib-1, ii+i-1:n) from the right.
cblas_zcopy(sb-i-1,
&A1[lda1*(ii+i)+(ii+i+1)], 1,
work, 1);
cblas_zgemv(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans,
sb-i-1, n,
CBLAS_SADDR(zone), &A2[ii+i+1], lda2,
&A2[ii+i], lda2,
CBLAS_SADDR(zone), work, 1);
cblas_zaxpy(sb-i-1, CBLAS_SADDR(alpha), work, 1,
&A1[lda1*(ii+i)+ii+i+1], 1);
cblas_zgerc(CblasColMajor,
sb-i-1, n,
CBLAS_SADDR(alpha), work, 1,
&A2[ii+i], lda2,
&A2[ii+i+1], lda2);
}
// Calculate T.
cblas_zgemv(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans,
i, n,
CBLAS_SADDR(alpha), &A2[ii], lda2,
&A2[ii+i], lda2,
CBLAS_SADDR(zzero), &T[ldt*(ii+i)], 1);
#ifdef COMPLEX
LAPACKE_zlacgv_work(n, &A2[ii+i], lda2);
LAPACKE_zlacgv_work(1, &A1[lda1*(ii+i)+ii+i], lda1);
#endif
cblas_ztrmv(
CblasColMajor,
(CBLAS_UPLO)PlasmaUpper,
(CBLAS_TRANSPOSE)PlasmaNoTrans, (CBLAS_DIAG)PlasmaNonUnit,
i,
&T[ldt*ii], ldt,
&T[ldt*(ii+i)], 1);
T[ldt*(ii+i)+i] = tau[ii+i];
}
if (m > ii+sb) {
core_ztsmlq(PlasmaRight, Plasma_ConjTrans,
m-(ii+sb), sb, m-(ii+sb), n, ib, ib,
&A1[lda1*ii+ii+sb], lda1,
&A2[ii+sb], lda2,
&A2[ii], lda2,
&T[ldt*ii], ldt,
work, lda1);
}
}
return PlasmaSuccess;
}
/******************************************************************************/
void core_omp_ztslqt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*m]) \
depend(inout:A2[0:lda2*n]) \
depend(out:T[0:ib*m]) // T should be mxib, but is stored
// as ibxm
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]);
// Call the kernel.
int info = core_ztslqt(m, n, ib,
A1, lda1,
A2, lda2,
T, ldt,
tau,
tau+m);
if (info != PlasmaSuccess) {
plasma_error("core_ztslqt() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
bt_single.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - BT
This benchmark is an OpenMP C version of the NPB BT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: R. Van der Wijngaart
T. Harris
M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
//#include "npb-C.h"
/*
NAS Parallel Benchmarks 2.3 OpenMP C Versions
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
typedef int boolean;
typedef struct { double real; double imag; } dcomplex;
#define TRUE 1
#define FALSE 0
#define max(a,b) (((a) > (b)) ? (a) : (b))
#define min(a,b) (((a) < (b)) ? (a) : (b))
#define pow2(a) ((a)*(a))
#define get_real(c) c.real
#define get_imag(c) c.imag
#define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag)
#define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag)
#define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \
c.imag = a.real * b.imag + a.imag * b.real)
#define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b)
extern double randlc(double *, double);
extern void vranlc(int, double *, double, double *);
extern void timer_clear(int);
extern void timer_start(int);
extern void timer_stop(int);
extern double timer_read(int);
extern void c_print_results(char *name, char cclass, int n1, int n2,
int n3, int niter, int nthreads, double t,
double mops, char *optype, int passed_verification,
char *npbversion, char *compiletime, char *cc,
char *clink, char *c_lib, char *c_inc,
char *cflags, char *clinkflags, char *rand);
/* global variables */
//#include "header.h"
/*--------------------------------------------------------------------
c---------------------------------------------------------------------
c
c header.h
c
c---------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c The following include file is generated automatically by the
c "setparams" utility. It defines
c maxcells: the square root of the maximum number of processors
c problem_size: 12, 64, 102, 162 (for class T, A, B, C)
c dt_default: default time step for this problem size if no
c config file
c niter_default: default number of iterations for this problem size
--------------------------------------------------------------------*/
//#include "npbparams.h"
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'A'
#endif
#if CLASS == 'S'
#define PROBLEM_SIZE 12
#define NITER_DEFAULT 60
#define DT_DEFAULT 0.010
#endif
#if CLASS == 'W'
#define PROBLEM_SIZE 24
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0008
#endif
#if CLASS == 'A'
#define PROBLEM_SIZE 64
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0008
#endif
#if CLASS == 'B'
#define PROBLEM_SIZE 102
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0003
#endif
#if CLASS == 'C'
#define PROBLEM_SIZE 162
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0001
#endif
#define CONVERTDOUBLE FALSE
#define COMPILETIME "27 Oct 2014"
#define NPBVERSION "2.3"
#define CS1 "gcc"
#define CS2 "$(CC)"
#define CS3 "(none)"
#define CS4 "-I../common"
#define CS5 "-fopenmp -O2"
#define CS6 "-lm -fopenmp"
#define CS7 "randdp"
//--------end class definition -----------
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
/* COMMON block: global */
static int grid_points[3]; /* grid_ponts(1:3) */
/* COMMON block: constants */
static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3;
static double dx1, dx2, dx3, dx4, dx5;
static double dy1, dy2, dy3, dy4, dy5;
static double dz1, dz2, dz3, dz4, dz5;
static double dssp, dt;
static double ce[5][13]; /* ce(5,13) */
static double dxmax, dymax, dzmax;
static double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5;
static double dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1;
static double yycon1, yycon2, yycon3, yycon4, yycon5;
static double dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1;
static double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5;
static double dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1;
static double dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345;
static double conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp;
static double dttx1, dttx2, dtty1, dtty2, dttz1, dttz2;
static double c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6;
static double c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
/*
c to improve cache performance, grid dimensions padded by 1
c for even number sizes only.
*/
/* COMMON block: fields */
static double us[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double vs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double ws[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double qs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double rho_i[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double square[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double forcing[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1];
static double u[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5];
static double rhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5];
static double lhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5];
/* COMMON block: work_1d */
static double cuf[PROBLEM_SIZE];
static double q[PROBLEM_SIZE];
static double ue[PROBLEM_SIZE][5];
static double buf[PROBLEM_SIZE][5];
//Liao, the program may be wrong!!
#pragma omp threadprivate(cuf, q, ue, buf)
/*
c to improve cache performance, grid dimensions (first two for these
c to arrays) padded by 1 for even number sizes only.
*/
/* COMMON block: work_lhs */
static double fjac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* fjac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double njac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* njac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double tmp1, tmp2, tmp3;
/* function declarations */
static void add(void);
static void adi(void);
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_rhs(void);
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]);
static void initialize(void);
static void lhsinit(void);
static void lhsx(void);
static void lhsy(void);
static void lhsz(void);
static void compute_rhs(void);
static void set_constants(void);
static void verify(int no_time_steps, char *cclass, boolean *verified);
static void x_solve(void);
static void x_backsubstitute(void);
static void x_solve_cell(void);
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]);
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
static void binvrhs(double lhs[5][5], double r[5]);
static void y_solve(void);
static void y_backsubstitute(void);
static void y_solve_cell(void);
static void z_solve(void);
static void z_backsubstitute(void);
static void z_solve_cell(void);
/*--------------------------------------------------------------------
program BT
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
int niter, step, n3;
int nthreads = 1;
double navg, mflops;
double tmax;
boolean verified;
char cclass;
FILE *fp;
/*--------------------------------------------------------------------
c Root node reads input file (if it exists) else takes
c defaults from parameters
c-------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - BT Benchmark\n\n");
fp = fopen("inputbt.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputbt.data");
fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
fscanf(fp, "%lg", &dt);
while (fgetc(fp) != '\n');
fscanf(fp, "%d%d%d",
&grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputbt.data. Using compiled defaults\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %3dx%3dx%3d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
if (grid_points[0] > IMAX ||
grid_points[1] > JMAX ||
grid_points[2] > KMAX) {
printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
#pragma omp parallel
{
initialize();
lhsinit();
exact_rhs();
/*--------------------------------------------------------------------
c do one time step to touch all code, and reinitialize
c-------------------------------------------------------------------*/
adi();
initialize();
} /* end parallel */
timer_clear(1);
timer_start(1);
#pragma omp parallel firstprivate(niter) private(step)
{
for (step = 1; step <= niter; step++) {
if (step%20 == 0 || step == 1) {
#pragma omp master
printf(" Time step %4d\n", step);
}
adi();
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(1);
tmax = timer_read(1);
verify(niter, &cclass, &verified);
n3 = grid_points[0]*grid_points[1]*grid_points[2];
navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0;
if ( fabs(tmax-0.0)>1.0e-5 ) {
mflops = 1.0e-6*(double)niter*
(3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax;
} else {
mflops = 0.0;
}
c_print_results("BT", cclass, grid_points[0],
grid_points[1], grid_points[2], niter, nthreads,
tmax, mflops, " floating point",
verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void add(void) {
/*--------------------------------------------------------------------
c addition of update to the vector u
c-------------------------------------------------------------------*/
int i, j, k, m;
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void adi(void) {
compute_rhs();
x_solve();
y_solve();
z_solve();
add();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error_norm(double rms[5]) {
/*--------------------------------------------------------------------
c this function computes the norm of the difference between the
c computed solution and the exact solution
c-------------------------------------------------------------------*/
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[i][j][k][m] - u_exact[m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
add = rhs[i][j][k][m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_rhs(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the right hand side based on exact solution
c-------------------------------------------------------------------*/
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
/*--------------------------------------------------------------------
c initialize
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(k,i,m)
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m <= 4; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] +
buf[i][3] * buf[i][3];
q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +
buf[i][3]*ue[i][3]);
}
for (i = 1; i < grid_points[0]-1; i++) {
im1 = i-1;
ip1 = i+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tx2*(ue[ip1][1]-ue[im1][1])+
dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-
(ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+
xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+
dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+
xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+
dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+
xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+
dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-
buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+
0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+
dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
i = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);
i = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[i-1][m] + 6.0*ue[i][m] -
4.0*ue[i+1][m] + ue[i+2][m]);
}
for (m = 0; m < 5; m++) {
for (i = 1*3; i <= grid_points[0]-3*1-1; i++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m]);
i = grid_points[0]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(k,j,m)
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] +
buf[j][3] * buf[j][3];
q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +
buf[j][3]*ue[j][3]);
}
for (j = 1; j < grid_points[1]-1; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
ty2*( ue[jp1][2]-ue[jm1][2] )+
dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+
yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+
dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-
(ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+
yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+
dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+
yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+
dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-
buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+
0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+
buf[jm1][0])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+
dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
j = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);
j = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[j-1][m] + 6.0*ue[j][m] -
4.0*ue[j+1][m] + ue[j+2][m]);
}
for (m = 0; m < 5; m++) {
for (j = 1*3; j <= grid_points[1]-3*1-1; j++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m]);
j = grid_points[1]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] +
buf[k][2] * buf[k][2];
q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +
buf[k][3]*ue[k][3]);
}
for (k = 1; k < grid_points[2]-1; k++) {
km1 = k-1;
kp1 = k+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tz2*( ue[kp1][3]-ue[km1][3] )+
dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+
zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+
dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+
zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+
dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-
(ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+
zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+
dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-
buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+
0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]
+buf[km1][0])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+
dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
k = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);
k = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[k-1][m] + 6.0*ue[k][m] -
4.0*ue[k+1][m] + ue[k+2][m]);
}
for (m = 0; m < 5; m++) {
for (k = 1*3; k <= grid_points[2]-3*1-1; k++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m]);
k = grid_points[2]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);
}
}
}
/*--------------------------------------------------------------------
c now change the sign of the forcing function,
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function returns the exact solution at point xi, eta, zeta
c-------------------------------------------------------------------*/
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] +
xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7]
+ xi*ce[m][10]))) +
eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8]
+ eta*ce[m][11])))+
zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] +
zeta*ce[m][12])));
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void initialize(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This subroutine initializes the field variable u using
c tri-linear transfinite interpolation of the boundary values
c-------------------------------------------------------------------*/
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
/*--------------------------------------------------------------------
c Later (in compute_rhs) we compute 1/u for every element. A few of
c the corner elements are not used, but it convenient (and faster)
c to compute the whole thing with a simple loop. Make sure those
c values are nonzero by initializing the whole thing here.
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < IMAX; i++) {
for (j = 0; j < IMAX; j++) {
for (k = 0; k < IMAX; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = 1.0;
}
}
}
}
/*--------------------------------------------------------------------
c first store the "interpolated" values everywhere on the grid
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,ix,iy,iz,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta,
&(Pface[ix][0][0]));
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta,
&Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz,
&Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] +
(1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] +
(1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] +
(1.0-zeta) * Pface[0][2][m];
u[i][j][k][m] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
/*--------------------------------------------------------------------
c now store the exact values on the boundaries
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c west face
c-------------------------------------------------------------------*/
i = 0;
xi = 0.0;
#pragma omp for private(k,m) nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c east face
c-------------------------------------------------------------------*/
i = grid_points[0]-1;
xi = 1.0;
#pragma omp for private(k,m)
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c south face
c-------------------------------------------------------------------*/
j = 0;
eta = 0.0;
#pragma omp for private(k,m) nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c north face
c-------------------------------------------------------------------*/
j = grid_points[1]-1;
eta = 1.0;
#pragma omp for private(k,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c bottom face
c-------------------------------------------------------------------*/
k = 0;
zeta = 0.0;
#pragma omp for private(j,m) nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i *dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c top face
c-------------------------------------------------------------------*/
k = grid_points[2]-1;
zeta = 1.0;
#pragma omp for private(j,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsinit(void) {
int i, j, k, m, n;
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c zero the whole left hand side for starters
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m,n)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
lhs[i][j][k][0][m][n] = 0.0;
lhs[i][j][k][1][m][n] = 0.0;
lhs[i][j][k][2][m][n] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c next, set all diagonal values to 1. This is overkill, but convenient
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
lhs[i][j][k][1][m][m] = 1.0;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsx(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side in the xi-direction
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c determine a (labeled f) and n jacobians
c-------------------------------------------------------------------*/
#pragma omp for private(k,i)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (i = 0; i < grid_points[0]; i++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 1.0;
fjac[ i][ j][ k][0][2] = 0.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 *
u[i][j][k][1])
+ c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2;
fjac[i][j][k][1][1] = ( 2.0 - c2 )
* ( u[i][j][k][1] / u[i][j][k][0] );
fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 );
fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][1][4] = c2;
fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2;
fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][2][3] = 0.0;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][2] = 0.0;
fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][1] * tmp1 );
fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( 3.0*u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 );
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = con43 * c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in x direction
c-------------------------------------------------------------------*/
for (i = 1; i < grid_points[0]-1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0]
- tmp1 * njac[i-1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1]
- tmp1 * njac[i-1][j][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2]
- tmp1 * njac[i-1][j][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3]
- tmp1 * njac[i-1][j][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4]
- tmp1 * njac[i-1][j][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0]
- tmp1 * njac[i-1][j][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1]
- tmp1 * njac[i-1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2]
- tmp1 * njac[i-1][j][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3]
- tmp1 * njac[i-1][j][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4]
- tmp1 * njac[i-1][j][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0]
- tmp1 * njac[i-1][j][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1]
- tmp1 * njac[i-1][j][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2]
- tmp1 * njac[i-1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3]
- tmp1 * njac[i-1][j][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4]
- tmp1 * njac[i-1][j][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0]
- tmp1 * njac[i-1][j][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1]
- tmp1 * njac[i-1][j][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2]
- tmp1 * njac[i-1][j][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3]
- tmp1 * njac[i-1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4]
- tmp1 * njac[i-1][j][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0]
- tmp1 * njac[i-1][j][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1]
- tmp1 * njac[i-1][j][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2]
- tmp1 * njac[i-1][j][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3]
- tmp1 * njac[i-1][j][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4]
- tmp1 * njac[i-1][j][k][4][4]
- tmp1 * dx5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0]
- tmp1 * njac[i+1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1]
- tmp1 * njac[i+1][j][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2]
- tmp1 * njac[i+1][j][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3]
- tmp1 * njac[i+1][j][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4]
- tmp1 * njac[i+1][j][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0]
- tmp1 * njac[i+1][j][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1]
- tmp1 * njac[i+1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2]
- tmp1 * njac[i+1][j][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3]
- tmp1 * njac[i+1][j][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4]
- tmp1 * njac[i+1][j][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0]
- tmp1 * njac[i+1][j][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1]
- tmp1 * njac[i+1][j][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2]
- tmp1 * njac[i+1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3]
- tmp1 * njac[i+1][j][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4]
- tmp1 * njac[i+1][j][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0]
- tmp1 * njac[i+1][j][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1]
- tmp1 * njac[i+1][j][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2]
- tmp1 * njac[i+1][j][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3]
- tmp1 * njac[i+1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4]
- tmp1 * njac[i+1][j][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0]
- tmp1 * njac[i+1][j][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1]
- tmp1 * njac[i+1][j][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2]
- tmp1 * njac[i+1][j][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3]
- tmp1 * njac[i+1][j][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4]
- tmp1 * njac[i+1][j][k][4][4]
- tmp1 * dx5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsy(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three y-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the tri-diagonal matrix;
c determine a (labeled f) and n jacobians for cell c
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 0.0;
fjac[ i][ j][ k][0][2] = 1.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][3] = 0.0;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2)
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][2][2] = ( 2.0 - c2 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1;
fjac[i][j][k][2][4] = c2;
fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][3][1] = 0.0;
fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * u[i][j][k][4] * tmp1 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2]
* tmp2;
fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ 3.0 * u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = con43 * c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
}
}
/*--------------------------------------------------------------------
c now joacobians set, so form left hand side in y direction
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0]
- tmp1 * njac[i][j-1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1]
- tmp1 * njac[i][j-1][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2]
- tmp1 * njac[i][j-1][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3]
- tmp1 * njac[i][j-1][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4]
- tmp1 * njac[i][j-1][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0]
- tmp1 * njac[i][j-1][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1]
- tmp1 * njac[i][j-1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2]
- tmp1 * njac[i][j-1][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3]
- tmp1 * njac[i][j-1][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4]
- tmp1 * njac[i][j-1][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0]
- tmp1 * njac[i][j-1][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1]
- tmp1 * njac[i][j-1][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2]
- tmp1 * njac[i][j-1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3]
- tmp1 * njac[i][j-1][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4]
- tmp1 * njac[i][j-1][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0]
- tmp1 * njac[i][j-1][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1]
- tmp1 * njac[i][j-1][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2]
- tmp1 * njac[i][j-1][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3]
- tmp1 * njac[i][j-1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4]
- tmp1 * njac[i][j-1][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0]
- tmp1 * njac[i][j-1][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1]
- tmp1 * njac[i][j-1][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2]
- tmp1 * njac[i][j-1][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3]
- tmp1 * njac[i][j-1][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4]
- tmp1 * njac[i][j-1][k][4][4]
- tmp1 * dy5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dy1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dy2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dy3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dy4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dy5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0]
- tmp1 * njac[i][j+1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1]
- tmp1 * njac[i][j+1][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2]
- tmp1 * njac[i][j+1][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3]
- tmp1 * njac[i][j+1][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4]
- tmp1 * njac[i][j+1][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0]
- tmp1 * njac[i][j+1][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1]
- tmp1 * njac[i][j+1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2]
- tmp1 * njac[i][j+1][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3]
- tmp1 * njac[i][j+1][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4]
- tmp1 * njac[i][j+1][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0]
- tmp1 * njac[i][j+1][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1]
- tmp1 * njac[i][j+1][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2]
- tmp1 * njac[i][j+1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3]
- tmp1 * njac[i][j+1][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4]
- tmp1 * njac[i][j+1][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0]
- tmp1 * njac[i][j+1][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1]
- tmp1 * njac[i][j+1][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2]
- tmp1 * njac[i][j+1][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3]
- tmp1 * njac[i][j+1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4]
- tmp1 * njac[i][j+1][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0]
- tmp1 * njac[i][j+1][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1]
- tmp1 * njac[i][j+1][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2]
- tmp1 * njac[i][j+1][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3]
- tmp1 * njac[i][j+1][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4]
- tmp1 * njac[i][j+1][k][4][4]
- tmp1 * dy5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsz(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three z-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the block-diagonal matrix;
c determine c (labeled f) and s jacobians
c---------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 0; k < grid_points[2]; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 0.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 1.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][1][2] = 0.0;
fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][2][1] = 0.0;
fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 )
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );
fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;
fjac[i][j][k][3][3] = ( 2.0 - c2 )
* u[i][j][k][3] * tmp1;
fjac[i][j][k][3][4] = c2;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ 3.0*u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 )* tmp1;
}
}
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in z direction
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0]
- tmp1 * njac[i][j][k-1][0][0]
- tmp1 * dz1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1]
- tmp1 * njac[i][j][k-1][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2]
- tmp1 * njac[i][j][k-1][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3]
- tmp1 * njac[i][j][k-1][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4]
- tmp1 * njac[i][j][k-1][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0]
- tmp1 * njac[i][j][k-1][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1]
- tmp1 * njac[i][j][k-1][1][1]
- tmp1 * dz2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2]
- tmp1 * njac[i][j][k-1][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3]
- tmp1 * njac[i][j][k-1][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4]
- tmp1 * njac[i][j][k-1][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0]
- tmp1 * njac[i][j][k-1][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1]
- tmp1 * njac[i][j][k-1][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2]
- tmp1 * njac[i][j][k-1][2][2]
- tmp1 * dz3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3]
- tmp1 * njac[i][j][k-1][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4]
- tmp1 * njac[i][j][k-1][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0]
- tmp1 * njac[i][j][k-1][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1]
- tmp1 * njac[i][j][k-1][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2]
- tmp1 * njac[i][j][k-1][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3]
- tmp1 * njac[i][j][k-1][3][3]
- tmp1 * dz4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4]
- tmp1 * njac[i][j][k-1][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0]
- tmp1 * njac[i][j][k-1][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1]
- tmp1 * njac[i][j][k-1][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2]
- tmp1 * njac[i][j][k-1][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3]
- tmp1 * njac[i][j][k-1][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4]
- tmp1 * njac[i][j][k-1][4][4]
- tmp1 * dz5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0]
- tmp1 * njac[i][j][k+1][0][0]
- tmp1 * dz1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1]
- tmp1 * njac[i][j][k+1][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2]
- tmp1 * njac[i][j][k+1][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3]
- tmp1 * njac[i][j][k+1][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4]
- tmp1 * njac[i][j][k+1][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0]
- tmp1 * njac[i][j][k+1][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1]
- tmp1 * njac[i][j][k+1][1][1]
- tmp1 * dz2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2]
- tmp1 * njac[i][j][k+1][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3]
- tmp1 * njac[i][j][k+1][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4]
- tmp1 * njac[i][j][k+1][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0]
- tmp1 * njac[i][j][k+1][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1]
- tmp1 * njac[i][j][k+1][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2]
- tmp1 * njac[i][j][k+1][2][2]
- tmp1 * dz3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3]
- tmp1 * njac[i][j][k+1][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4]
- tmp1 * njac[i][j][k+1][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0]
- tmp1 * njac[i][j][k+1][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1]
- tmp1 * njac[i][j][k+1][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2]
- tmp1 * njac[i][j][k+1][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3]
- tmp1 * njac[i][j][k+1][3][3]
- tmp1 * dz4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4]
- tmp1 * njac[i][j][k+1][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0]
- tmp1 * njac[i][j][k+1][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1]
- tmp1 * njac[i][j][k+1][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2]
- tmp1 * njac[i][j][k+1][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3]
- tmp1 * njac[i][j][k+1][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4]
- tmp1 * njac[i][j][k+1][4][4]
- tmp1 * dz5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void compute_rhs(void) {
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
/*--------------------------------------------------------------------
c compute the reciprocal of density, and the kinetic energy,
c and the speed of sound.
c-------------------------------------------------------------------*/
#pragma omp for private(j,k) nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
rho_inv = 1.0/u[i][j][k][0];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[i][j][k][1] * rho_inv;
vs[i][j][k] = u[i][j][k][2] * rho_inv;
ws[i][j][k] = u[i][j][k][3] * rho_inv;
square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] +
u[i][j][k][2]*u[i][j][k][2] +
u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
}
}
}
/*--------------------------------------------------------------------
c copy the exact forcing term to the right hand side; because
c this forcing term is known, we can store it on the whole grid
c including the boundary
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = forcing[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c compute xi-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
uijk = us[i][j][k];
up1 = us[i+1][j][k];
um1 = us[i-1][j][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 *
(u[i+1][j][k][0] - 2.0*u[i][j][k][0] +
u[i-1][j][k][0]) -
tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 *
(u[i+1][j][k][1] - 2.0*u[i][j][k][1] +
u[i-1][j][k][1]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[i+1][j][k][1]*up1 -
u[i-1][j][k][1]*um1 +
(u[i+1][j][k][4]- square[i+1][j][k]-
u[i-1][j][k][4]+ square[i-1][j][k])*
c2);
rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 *
(u[i+1][j][k][2] - 2.0*u[i][j][k][2] +
u[i-1][j][k][2]) +
xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +
vs[i-1][j][k]) -
tx2 * (u[i+1][j][k][2]*up1 -
u[i-1][j][k][2]*um1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 *
(u[i+1][j][k][3] - 2.0*u[i][j][k][3] +
u[i-1][j][k][3]) +
xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +
ws[i-1][j][k]) -
tx2 * (u[i+1][j][k][3]*up1 -
u[i-1][j][k][3]*um1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 *
(u[i+1][j][k][4] - 2.0*u[i][j][k][4] +
u[i-1][j][k][4]) +
xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +
qs[i-1][j][k]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i-1][j][k][4]*rho_i[i-1][j][k]) -
tx2 * ( (c1*u[i+1][j][k][4] -
c2*square[i+1][j][k])*up1 -
(c1*u[i-1][j][k][4] -
c2*square[i-1][j][k])*um1 );
}
}
}
/*--------------------------------------------------------------------
c add fourth order xi-direction dissipation
c-------------------------------------------------------------------*/
i = 1;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m]);
}
}
}
i = 2;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 3; i < grid_points[0]-3; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m] );
}
}
}
}
i = grid_points[0]-3;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );
}
}
}
i = grid_points[0]-2;
#pragma omp for private(k,m)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +
5.0*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute eta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j+1][k];
vm1 = vs[i][j-1][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 *
(u[i][j+1][k][0] - 2.0*u[i][j][k][0] +
u[i][j-1][k][0]) -
ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 *
(u[i][j+1][k][1] - 2.0*u[i][j][k][1] +
u[i][j-1][k][1]) +
yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] +
us[i][j-1][k]) -
ty2 * (u[i][j+1][k][1]*vp1 -
u[i][j-1][k][1]*vm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 *
(u[i][j+1][k][2] - 2.0*u[i][j][k][2] +
u[i][j-1][k][2]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[i][j+1][k][2]*vp1 -
u[i][j-1][k][2]*vm1 +
(u[i][j+1][k][4] - square[i][j+1][k] -
u[i][j-1][k][4] + square[i][j-1][k])
*c2);
rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 *
(u[i][j+1][k][3] - 2.0*u[i][j][k][3] +
u[i][j-1][k][3]) +
yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] +
ws[i][j-1][k]) -
ty2 * (u[i][j+1][k][3]*vp1 -
u[i][j-1][k][3]*vm1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 *
(u[i][j+1][k][4] - 2.0*u[i][j][k][4] +
u[i][j-1][k][4]) +
yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] +
qs[i][j-1][k]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j-1][k][4]*rho_i[i][j-1][k]) -
ty2 * ((c1*u[i][j+1][k][4] -
c2*square[i][j+1][k]) * vp1 -
(c1*u[i][j-1][k][4] -
c2*square[i][j-1][k]) * vm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order eta-direction dissipation
c-------------------------------------------------------------------*/
j = 1;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m]);
}
}
}
j = 2;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 3; j < grid_points[1]-3; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m] );
}
}
}
}
j = grid_points[1]-3;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );
}
}
}
j = grid_points[1]-2;
#pragma omp for private(k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +
5.*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute zeta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k+1];
wm1 = ws[i][j][k-1];
rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 *
(u[i][j][k+1][0] - 2.0*u[i][j][k][0] +
u[i][j][k-1][0]) -
tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 *
(u[i][j][k+1][1] - 2.0*u[i][j][k][1] +
u[i][j][k-1][1]) +
zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] +
us[i][j][k-1]) -
tz2 * (u[i][j][k+1][1]*wp1 -
u[i][j][k-1][1]*wm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 *
(u[i][j][k+1][2] - 2.0*u[i][j][k][2] +
u[i][j][k-1][2]) +
zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] +
vs[i][j][k-1]) -
tz2 * (u[i][j][k+1][2]*wp1 -
u[i][j][k-1][2]*wm1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 *
(u[i][j][k+1][3] - 2.0*u[i][j][k][3] +
u[i][j][k-1][3]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[i][j][k+1][3]*wp1 -
u[i][j][k-1][3]*wm1 +
(u[i][j][k+1][4] - square[i][j][k+1] -
u[i][j][k-1][4] + square[i][j][k-1])
*c2);
rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 *
(u[i][j][k+1][4] - 2.0*u[i][j][k][4] +
u[i][j][k-1][4]) +
zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] +
qs[i][j][k-1]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j][k-1][4]*rho_i[i][j][k-1]) -
tz2 * ( (c1*u[i][j][k+1][4] -
c2*square[i][j][k+1])*wp1 -
(c1*u[i][j][k-1][4] -
c2*square[i][j][k-1])*wm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order zeta-direction dissipation
c-------------------------------------------------------------------*/
k = 1;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m]);
}
}
}
k = 2;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 3; k < grid_points[2]-3; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m] );
}
}
}
}
k = grid_points[2]-3;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );
}
}
}
k = grid_points[2]-2;
#pragma omp for private(j,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
5.0*u[i][j][k][m] );
}
}
}
#pragma omp for private(k,m,i)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
for (i = 1; i < grid_points[0]-1; i++) {
rhs[i][j][k][m] = rhs[i][j][k][m] * dt;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void set_constants(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(int no_time_steps, char *cclass, boolean *verified) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c verification routine
c-------------------------------------------------------------------*/
double xcrref[5],xceref[5],xcrdif[5],xcedif[5],
epsilon, xce[5], xcr[5], dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
c-------------------------------------------------------------------*/
epsilon = 1.0e-08;
/*--------------------------------------------------------------------
c compute the error norm and the residual norm, and exit if not printing
c-------------------------------------------------------------------*/
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*cclass = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
/*--------------------------------------------------------------------
c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02
c-------------------------------------------------------------------*/
if (grid_points[0] == 12 &&
grid_points[1] == 12 &&
grid_points[2] == 12 &&
no_time_steps == 60) {
*cclass = 'S';
dtref = 1.0e-2;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
/*--------------------------------------------------------------------
c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 24 &&
grid_points[1] == 24 &&
grid_points[2] == 24 &&
no_time_steps == 200) {
*cclass = 'W';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
/*--------------------------------------------------------------------
c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 64 &&
grid_points[1] == 64 &&
grid_points[2] == 64 &&
no_time_steps == 200) {
*cclass = 'A';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
/*--------------------------------------------------------------------
c reference data for 102X102X102 grids after 200 time steps,
c with DT = 3.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 102 &&
grid_points[1] == 102 &&
grid_points[2] == 102 &&
no_time_steps == 200) {
*cclass = 'B';
dtref = 3.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
/*--------------------------------------------------------------------
c reference data for 162X162X162 grids after 200 time steps,
c with DT = 1.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 162 &&
grid_points[1] == 162 &&
grid_points[2] == 162 &&
no_time_steps == 200) {
*cclass = 'C';
dtref = 1.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
c-------------------------------------------------------------------*/
if (*cclass != 'U') {
printf(" Verification being performed for class %1c\n", *cclass);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*cclass = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
}
}
if (*cclass == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified == TRUE) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c
c Performs line solves in X direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c
c-------------------------------------------------------------------*/
lhsx();
x_solve_cell();
x_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(isize)=rhs[isize)
c else assume U(isize) is loaded in un pack backsub_info
c so just use it
c after call u(istart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (i = grid_points[0]-2; i >= 0; i--) {
#pragma omp for private(k,m,n)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve_cell(void) {
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(IMAX) and rhs'(IMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i,j,k,isize;
isize = grid_points[0]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(0,j,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[0][j][k][BB],
lhs[0][j][k][CC],
rhs[0][j][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (i = 1; i < isize; i++) {
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(i) = rhs(i) - A*rhs(i-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i-1][j][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(i) = B(i) - C(i-1)*A(i)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i-1][j][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(isize) = rhs(isize) - A*rhs(isize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[isize][j][k][AA],
rhs[isize-1][j][k], rhs[isize][j][k]);
/*--------------------------------------------------------------------
c B(isize) = B(isize) - C(isize-1)*A(isize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[isize][j][k][AA],
lhs[isize-1][j][k][CC],
lhs[isize][j][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs() by b_inverse() and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][k][BB],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts bvec=bvec - ablock*avec
c-------------------------------------------------------------------*/
int i;
for (i = 0; i < 5; i++) {
/*--------------------------------------------------------------------
c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell)
c $ - lhs[i,1,ablock,ia,ja,ka,acell)*
c-------------------------------------------------------------------*/
bvec[i] = bvec[i] - ablock[i][0]*avec[0]
- ablock[i][1]*avec[1]
- ablock[i][2]*avec[2]
- ablock[i][3]*avec[3]
- ablock[i][4]*avec[4];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
c-------------------------------------------------------------------*/
int j;
for (j = 0; j < 5; j++) {
cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j]
- ablock[0][1]*bblock[1][j]
- ablock[0][2]*bblock[2][j]
- ablock[0][3]*bblock[3][j]
- ablock[0][4]*bblock[4][j];
cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j]
- ablock[1][1]*bblock[1][j]
- ablock[1][2]*bblock[2][j]
- ablock[1][3]*bblock[3][j]
- ablock[1][4]*bblock[4][j];
cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j]
- ablock[2][1]*bblock[1][j]
- ablock[2][2]*bblock[2][j]
- ablock[2][3]*bblock[3][j]
- ablock[2][4]*bblock[4][j];
cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j]
- ablock[3][1]*bblock[1][j]
- ablock[3][2]*bblock[2][j]
- ablock[3][3]*bblock[3][j]
- ablock[3][4]*bblock[4][j];
cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j]
- ablock[4][1]*bblock[1][j]
- ablock[4][2]*bblock[2][j]
- ablock[4][3]*bblock[3][j]
- ablock[4][4]*bblock[4][j];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
c[0][0] = c[0][0]*pivot;
c[0][1] = c[0][1]*pivot;
c[0][2] = c[0][2]*pivot;
c[0][3] = c[0][3]*pivot;
c[0][4] = c[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
c[1][0] = c[1][0] - coeff*c[0][0];
c[1][1] = c[1][1] - coeff*c[0][1];
c[1][2] = c[1][2] - coeff*c[0][2];
c[1][3] = c[1][3] - coeff*c[0][3];
c[1][4] = c[1][4] - coeff*c[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
c[2][0] = c[2][0] - coeff*c[0][0];
c[2][1] = c[2][1] - coeff*c[0][1];
c[2][2] = c[2][2] - coeff*c[0][2];
c[2][3] = c[2][3] - coeff*c[0][3];
c[2][4] = c[2][4] - coeff*c[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
c[3][0] = c[3][0] - coeff*c[0][0];
c[3][1] = c[3][1] - coeff*c[0][1];
c[3][2] = c[3][2] - coeff*c[0][2];
c[3][3] = c[3][3] - coeff*c[0][3];
c[3][4] = c[3][4] - coeff*c[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
c[4][0] = c[4][0] - coeff*c[0][0];
c[4][1] = c[4][1] - coeff*c[0][1];
c[4][2] = c[4][2] - coeff*c[0][2];
c[4][3] = c[4][3] - coeff*c[0][3];
c[4][4] = c[4][4] - coeff*c[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
c[1][0] = c[1][0]*pivot;
c[1][1] = c[1][1]*pivot;
c[1][2] = c[1][2]*pivot;
c[1][3] = c[1][3]*pivot;
c[1][4] = c[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
c[0][0] = c[0][0] - coeff*c[1][0];
c[0][1] = c[0][1] - coeff*c[1][1];
c[0][2] = c[0][2] - coeff*c[1][2];
c[0][3] = c[0][3] - coeff*c[1][3];
c[0][4] = c[0][4] - coeff*c[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
c[2][0] = c[2][0] - coeff*c[1][0];
c[2][1] = c[2][1] - coeff*c[1][1];
c[2][2] = c[2][2] - coeff*c[1][2];
c[2][3] = c[2][3] - coeff*c[1][3];
c[2][4] = c[2][4] - coeff*c[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
c[3][0] = c[3][0] - coeff*c[1][0];
c[3][1] = c[3][1] - coeff*c[1][1];
c[3][2] = c[3][2] - coeff*c[1][2];
c[3][3] = c[3][3] - coeff*c[1][3];
c[3][4] = c[3][4] - coeff*c[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
c[4][0] = c[4][0] - coeff*c[1][0];
c[4][1] = c[4][1] - coeff*c[1][1];
c[4][2] = c[4][2] - coeff*c[1][2];
c[4][3] = c[4][3] - coeff*c[1][3];
c[4][4] = c[4][4] - coeff*c[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
c[2][0] = c[2][0]*pivot;
c[2][1] = c[2][1]*pivot;
c[2][2] = c[2][2]*pivot;
c[2][3] = c[2][3]*pivot;
c[2][4] = c[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
c[0][0] = c[0][0] - coeff*c[2][0];
c[0][1] = c[0][1] - coeff*c[2][1];
c[0][2] = c[0][2] - coeff*c[2][2];
c[0][3] = c[0][3] - coeff*c[2][3];
c[0][4] = c[0][4] - coeff*c[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
c[1][0] = c[1][0] - coeff*c[2][0];
c[1][1] = c[1][1] - coeff*c[2][1];
c[1][2] = c[1][2] - coeff*c[2][2];
c[1][3] = c[1][3] - coeff*c[2][3];
c[1][4] = c[1][4] - coeff*c[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
c[3][0] = c[3][0] - coeff*c[2][0];
c[3][1] = c[3][1] - coeff*c[2][1];
c[3][2] = c[3][2] - coeff*c[2][2];
c[3][3] = c[3][3] - coeff*c[2][3];
c[3][4] = c[3][4] - coeff*c[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
c[4][0] = c[4][0] - coeff*c[2][0];
c[4][1] = c[4][1] - coeff*c[2][1];
c[4][2] = c[4][2] - coeff*c[2][2];
c[4][3] = c[4][3] - coeff*c[2][3];
c[4][4] = c[4][4] - coeff*c[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
c[3][0] = c[3][0]*pivot;
c[3][1] = c[3][1]*pivot;
c[3][2] = c[3][2]*pivot;
c[3][3] = c[3][3]*pivot;
c[3][4] = c[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
c[0][0] = c[0][0] - coeff*c[3][0];
c[0][1] = c[0][1] - coeff*c[3][1];
c[0][2] = c[0][2] - coeff*c[3][2];
c[0][3] = c[0][3] - coeff*c[3][3];
c[0][4] = c[0][4] - coeff*c[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
c[1][0] = c[1][0] - coeff*c[3][0];
c[1][1] = c[1][1] - coeff*c[3][1];
c[1][2] = c[1][2] - coeff*c[3][2];
c[1][3] = c[1][3] - coeff*c[3][3];
c[1][4] = c[1][4] - coeff*c[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
c[2][0] = c[2][0] - coeff*c[3][0];
c[2][1] = c[2][1] - coeff*c[3][1];
c[2][2] = c[2][2] - coeff*c[3][2];
c[2][3] = c[2][3] - coeff*c[3][3];
c[2][4] = c[2][4] - coeff*c[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
c[4][0] = c[4][0] - coeff*c[3][0];
c[4][1] = c[4][1] - coeff*c[3][1];
c[4][2] = c[4][2] - coeff*c[3][2];
c[4][3] = c[4][3] - coeff*c[3][3];
c[4][4] = c[4][4] - coeff*c[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
c[4][0] = c[4][0]*pivot;
c[4][1] = c[4][1]*pivot;
c[4][2] = c[4][2]*pivot;
c[4][3] = c[4][3]*pivot;
c[4][4] = c[4][4]*pivot;
r[4] = r[4] *pivot;
coeff = lhs[0][4];
c[0][0] = c[0][0] - coeff*c[4][0];
c[0][1] = c[0][1] - coeff*c[4][1];
c[0][2] = c[0][2] - coeff*c[4][2];
c[0][3] = c[0][3] - coeff*c[4][3];
c[0][4] = c[0][4] - coeff*c[4][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
c[1][0] = c[1][0] - coeff*c[4][0];
c[1][1] = c[1][1] - coeff*c[4][1];
c[1][2] = c[1][2] - coeff*c[4][2];
c[1][3] = c[1][3] - coeff*c[4][3];
c[1][4] = c[1][4] - coeff*c[4][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
c[2][0] = c[2][0] - coeff*c[4][0];
c[2][1] = c[2][1] - coeff*c[4][1];
c[2][2] = c[2][2] - coeff*c[4][2];
c[2][3] = c[2][3] - coeff*c[4][3];
c[2][4] = c[2][4] - coeff*c[4][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
c[3][0] = c[3][0] - coeff*c[4][0];
c[3][1] = c[3][1] - coeff*c[4][1];
c[3][2] = c[3][2] - coeff*c[4][2];
c[3][3] = c[3][3] - coeff*c[4][3];
c[3][4] = c[3][4] - coeff*c[4][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvrhs( double lhs[5][5], double r[5] ) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
r[4] = r[4] *pivot;
coeff = lhs[0][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Y direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix][
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsy();
y_solve_cell();
y_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell][ then generate U(jsize)=rhs(jsize)
c else assume U(jsize) is loaded in un pack backsub_info
c so just use it
c after call u(jstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (j = grid_points[1]-2; j >= 0; j--) {
#pragma omp for private(k,m,n)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(JMAX) and rhs'(JMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, jsize;
jsize = grid_points[1]-1;
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(i,0,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][0][k][BB],
lhs[i][0][k][CC],
rhs[i][0][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (j = 1; j < jsize; j++) {
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(j-1) from lhs_vector(j)
c
c rhs(j) = rhs(j) - A*rhs(j-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j-1][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(j) = B(j) - C(j-1)*A(j)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j-1][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][jsize][k][AA],
rhs[i][jsize-1][k], rhs[i][jsize][k]);
/*--------------------------------------------------------------------
c B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
c call matmul_sub(aa,i,jsize,k,c,
c $ cc,i,jsize-1,k,c,BB,i,jsize,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][jsize][k][AA],
lhs[i][jsize-1][k][CC],
lhs[i][jsize][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][jsize][k][BB],
rhs[i][jsize][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Z direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsz();
z_solve_cell();
z_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(ksize)=rhs(ksize)
c else assume U(ksize) is loaded in un pack backsub_info
c so just use it
c after call u(kstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
#pragma omp for private(j,k,m,n)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = grid_points[2]-2; k >= 0; k--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(KMAX) and rhs'(KMAX) will be sent to next cell.
c-------------------------------------------------------------------*/
int i,j,k,ksize;
ksize = grid_points[2]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c multiply c(i,j,0) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][0][BB],
lhs[i][j][0][CC],
rhs[i][j][0] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (k = 1; k < ksize; k++) {
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(k-1) from lhs_vector(k)
c
c rhs(k) = rhs(k) - A*rhs(k-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j][k-1], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(k) = B(k) - C(k-1)*A(k)
c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j][k-1][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
c Now finish up special cases for last cell
c-------------------------------------------------------------------*/
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][ksize][AA],
rhs[i][j][ksize-1], rhs[i][j][ksize]);
/*--------------------------------------------------------------------
c B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
c call matmul_sub(aa,i,j,ksize,c,
c $ cc,i,j,ksize-1,c,BB,i,j,ksize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][ksize][AA],
lhs[i][j][ksize-1][CC],
lhs[i][j][ksize][BB]);
/*--------------------------------------------------------------------
c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][ksize][BB],
rhs[i][j][ksize] );
}
}
}
/* cat ./common/c_print_results.c */
/*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
void c_print_results( char *name,
char cclass,
int n1,
int n2,
int n3,
int niter,
int nthreads,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags,
char *rand)
{
char *evalue="1000";
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", cclass );
if( n2 == 0 && n3 == 0 )
printf( " Size = %12d\n", n1 ); /* as in IS */
else
printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Threads = %12d\n", nthreads );
printf( " Time in seconds = %12.2f\n", t );
printf( " Mop/s total = %12.2f\n", mops );
printf( " Operation type = %24s\n", optype);
if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( " RAND = %s\n", rand );
#ifdef SMP
evalue = getenv("MP_SET_NUMTHREADS");
printf( " MULTICPUS = %s\n", evalue );
#endif
/* printf( "\n\n" );
printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: npb@nas.nasa.gov\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 415-604-3957\n\n" );*/
}
/*
cat ./common/c_timers.c
*/
/*
#include "wtime.h"
#if defined(IBM)
#define wtime wtime
#elif defined(CRAY)
#define wtime WTIME
#else
#define wtime wtime_
#endif
*/
/* Prototype */
void wtime( double * );
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return( t );
}
double start[64], elapsed[64];
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return( elapsed[n] );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *)0);
//gettimeofday(&tv, (struct timezone *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec;
}
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/shear.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortMethod *method,const size_t number_arguments,const double *arguments,
size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) ResetMagickMemory(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod,
exception);
if (image->alpha_trait == UndefinedPixelTrait)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel,
exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
distort alpha channel separately
*/
Image
*resize_alpha;
(void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_alpha == (Image *) NULL)
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
(void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception);
(void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save,exception);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->alpha_trait=image->alpha_trait;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image, DistortMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
register ssize_t
i;
char image_gen[MagickPathExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse)
{
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace,exception);
if (distort_image->background_color.alpha_trait != UndefinedPixelTrait)
distort_image->alpha_trait=BlendPixelTrait;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetPixelInfo(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
PixelInfo
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid,
exception);
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 0
/*if ( i == 0 && j == 0 )*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelViaPixelInfo(distort_image,&invalid,q);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel,
exception);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelViaPixelInfo(distort_image,&pixel,q);
}
q+=GetPixelChannels(distort_image);
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
double
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=degrees;
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod,
exception);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const SparseColorMethod method,const size_t number_arguments,
const double *arguments,ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortMethod
distort_method;
distort_method=(DistortMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel to assign to distorted image */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=0.0;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=0.0;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=0.0;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=0.0;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red += arguments[x++]*weight;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green += arguments[x++]*weight;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue += arguments[x++]*weight;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black += arguments[x++]*weight;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha += arguments[x++]*weight;
denominator += weight;
}
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red/=denominator;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green/=denominator;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue/=denominator;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black/=denominator;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha/=denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for (k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=ClampPixel(QuantumRange*pixel.red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=ClampPixel(QuantumRange*pixel.green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=ClampPixel(QuantumRange*pixel.blue);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=ClampPixel(QuantumRange*pixel.black);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=ClampPixel(QuantumRange*pixel.alpha);
SetPixelViaPixelInfo(sparse_image,&pixel,q);
q+=GetPixelChannels(sparse_image);
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
deconvolution_packnto1_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn;
int k = y * kernel_w + x;
vfloat16m1_t _val = vle16_v_f16m1(sptr, vl);
vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl);
_sum = vfwmacc_vv_f32m2(_sum, _val, _w, vl);
}
}
kptr += maxk * packn;
}
#if C906
// TODO
std::vector<float> ss(packn);
vse32_v_f32m2((float*)ss.data(), _sum, vl);
for (int i = 0; i < packn; i++)
{
sum += ss[i];
}
#else
sum = vfmv_f_s_f32m1_f32(vfredusum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl));
#endif
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
static void deconvolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const __fp16* bias_data_ptr = bias_data_fp16;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__fp16 sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn;
int k = y * kernel_w + x;
vfloat16m1_t _val = vle16_v_f16m1(sptr, vl);
vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl);
_sum = vfmacc_vv_f16m1(_sum, _val, _w, vl);
}
}
kptr += maxk * packn;
}
sum = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl));
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
cfd-mini-c.test.c | /******************************************************************************
* This is a benchmark written to emulate the behavior of a typical
* computational fluid dynamics benchmark. The physical space being simulated
* is broken into boxes (cubes). All boxes have the same number of cells.
*
* build:
* gcc -DOPEN_MP -fopenmp cfd-mini-c.c -o cfd-mini-c
* OR
* gcc cfd-mini-c.c -o cfd-mini-c
*
* usage:
* ./cfd-mini-c -b 2 -c 8 -v 1 // 2 blocks, 8^3 cells each, verify on
* ./cfd-mini-c -b 32 -c 128 // 32 blocks, 128^3 cells each
*
* There are several versions of this benchmark - the original written with
* the use of CHOMBO. This versions here are written in pure c.
* cfd_explain: is meant to be easy to read and understand in
* order to explain the algorithm.
* cfd_*_pa: uses a 1D data allocation with pointer arithmetic
* in order to achieve the same performance as the
* baseline CHOMBO version.
*
* STATUS: The openMP pragmas are not finalized and there is currently a memory
* bug associated with using them.
*
* authors: Catherine Olschanowsky
*
* Copyright (c) 2014, Colorado State University
* All rights reserved.
*******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <string.h>
#include <sys/time.h>
#include <ctype.h>
//#define OPEN_MP
#ifdef OPEN_MP
#include <omp.h>
#endif
typedef double Real;
const int nGhost=2;
const size_t numComp = 5; // Dim(3) + 2
const Real dx = 0.5;
const Real factor1 = 1./12.;
const Real factor2 = 2.;
const Real compMultiplier = 1.;
// these are for testing only (test=1 prints data verify=1 does an internal
// comparison)
int tests = 0;
int verify=0;
int np;
int cfd_mini(int numCell, int numBox, int kernel);
// kernel 0
Real** cfd_explain(int numCell, int numBox,int nGhost);
// kernel 1
Real** cfd_baseline(int numCell, int numBox);
/******************************************************************************
* 1. process command line arguments
* 2. call mini-app function
* 3. exit
*******************************************************************************/
int main(int argc, char **argv) {
int index;
int c;
int numCell = 128;
int numBox = 32;
// by default the explain kernel is used
int kernel = 0;
#ifdef OPEN_MP
np = omp_get_num_threads();
#else
np = 1;
#endif
// Step 1: process command line arguments
opterr = 0;
while ((c = getopt (argc, argv, "tvb:c:k:p:")) != -1){
switch (c)
{
case 't':
tests = 1;
break;
case 'v':
verify = 1;
break;
case 'k':
kernel = atoi(optarg);
break;
case 'c':
numCell = atoi(optarg);
break;
case 'b':
numBox = atoi(optarg);
break;
case 'p':
np = atoi(optarg);
#ifdef OPEN_MP
omp_set_num_threads(np);
#endif
break;
case '?':
if (optopt == 'c' || optopt == 'b')
fprintf (stderr, "Option -%c requires an argument.\n", optopt);
else if (isprint (optopt))
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf (stderr,
"Unknown option character `\\x%x'.\n",
optopt);
return 1;
default:
abort ();
}
}
if(optind < (argc)){
fprintf(stderr,"Command Line Parsing Terminated Prematurely.\n");
fprintf(stderr,"Check that extra arguments were not passed in.\n");
abort();
}
// print out the parameters
//printf("numComp : %d\nnumCell : %d\nnumBox : %d\n",
//(int)numComp,(int)numCell,(int)numBox);
printf("nCell=%d nBox=%d\n",numCell,numBox);
if(tests)
printf("Resulting Data\n");
// Step 2: Call the function to do the work
cfd_mini(numCell, numBox, kernel);
return 1;
}
#undef GET_VAL_PTR
#define GET_VAL_PTR(b,c,z,y,x) (b)+\
(c)*full_numCell3+\
((z)+nGhost)*full_numCell2+\
((y)+nGhost)*full_numCell+\
((x)+nGhost)
#define GET_FACE_VAL_PTR(d,b,c,z,y,x) (b)+\
(c)*(numCell+((d)==2))*(numCell+((d)==1))*(numCell+((d)==0)) +\
(z)*(numCell+((d)==1))*(numCell+((d)==0))+\
(y)*(numCell+((d)==0))+\
(x)
/******************************************************************************
* cfd_mini
* Input
* -----
* numCell - the number of cells in a single dimension of a single box
* numBoxes - the number of independent boxes to process
*******************************************************************************/
int cfd_mini(int numCell, int numBox, int kernel){
Real ** verify_data_base;
Real ** verify_me;
if(verify && !kernel){
fprintf(stderr,"You must specify a kernel to verify\n");
return 0;
}
if(verify || !kernel){
verify_data_base = cfd_explain(numCell,numBox,nGhost);
}
switch (kernel)
{
case 1:
verify_me = cfd_baseline(numCell, numBox);
break;
}
if(verify){
int full_numCell = numCell+2*nGhost;
int full_numCell2 = full_numCell*full_numCell;
int full_numCell3 = full_numCell*full_numCell+full_numCell;
int totalCells = full_numCell*full_numCell*full_numCell;
int flux_totalSize = numCell*numCell*(numCell+1);
int idx,iz,iy,ix;
Real e = 0.001;
for(idx=0;idx < numBox;idx++){
Real* truth = verify_data_base[idx];
Real* hope = verify_me[idx];
for(iz=0;iz<numCell;iz++){
for(iy=0;iy<numCell;iy++){
for(ix=0;ix<numCell;ix++){
if(*GET_VAL_PTR(hope,0,iz,iy,ix)>*GET_VAL_PTR(truth,0,iz,iy,ix)+e||
*GET_VAL_PTR(hope,0,iz,iy,ix)<*GET_VAL_PTR(truth,0,iz,iy,ix)-e){
fprintf(stderr,"VERIFICATION FAILURE!!!\n");
return 1;
}
}
}
}
}
fprintf(stderr,"Code is verified\n");
}
return 0;
}
/******************************************************************************
* defines for cfd_basic code -- this makes for easier reading and
* debugging
******************************************************************************/
#define p_DATA_old(z,y,x) *(GET_VAL_PTR(old_box,0,z,y,x))
#define e_DATA_old(z,y,x) *(GET_VAL_PTR(old_box,1,z,y,x))
#define u_DATA_old(z,y,x) *(GET_VAL_PTR(old_box,2,z,y,x))
#define v_DATA_old(z,y,x) *(GET_VAL_PTR(old_box,3,z,y,x))
#define w_DATA_old(z,y,x) *(GET_VAL_PTR(old_box,4,z,y,x))
#define p_DATA_new(z,y,x) *(GET_VAL_PTR(new_box,0,z,y,x))
#define e_DATA_new(z,y,x) *(GET_VAL_PTR(new_box,1,z,y,x))
#define u_DATA_new(z,y,x) *(GET_VAL_PTR(new_box,2,z,y,x))
#define v_DATA_new(z,y,x) *(GET_VAL_PTR(new_box,3,z,y,x))
#define w_DATA_new(z,y,x) *(GET_VAL_PTR(new_box,4,z,y,x))
#define p_CACHE_x(z,y,x) *(GET_FACE_VAL_PTR(0,g_cache,0,z,y,x))
#define e_CACHE_x(z,y,x) *(GET_FACE_VAL_PTR(0,g_cache,1,z,y,x))
#define u_CACHE_x(z,y,x) *(GET_FACE_VAL_PTR(0,g_cache,2,z,y,x))
#define v_CACHE_x(z,y,x) *(GET_FACE_VAL_PTR(0,g_cache,3,z,y,x))
#define w_CACHE_x(z,y,x) *(GET_FACE_VAL_PTR(0,g_cache,4,z,y,x))
#define p_CACHE_y(z,y,x) *(GET_FACE_VAL_PTR(1,g_cache,0,z,y,x))
#define e_CACHE_y(z,y,x) *(GET_FACE_VAL_PTR(1,g_cache,1,z,y,x))
#define u_CACHE_y(z,y,x) *(GET_FACE_VAL_PTR(1,g_cache,2,z,y,x))
#define v_CACHE_y(z,y,x) *(GET_FACE_VAL_PTR(1,g_cache,3,z,y,x))
#define w_CACHE_y(z,y,x) *(GET_FACE_VAL_PTR(1,g_cache,4,z,y,x))
#define p_CACHE_z(z,y,x) *(GET_FACE_VAL_PTR(2,g_cache,0,z,y,x))
#define e_CACHE_z(z,y,x) *(GET_FACE_VAL_PTR(2,g_cache,1,z,y,x))
#define u_CACHE_z(z,y,x) *(GET_FACE_VAL_PTR(2,g_cache,2,z,y,x))
#define v_CACHE_z(z,y,x) *(GET_FACE_VAL_PTR(2,g_cache,3,z,y,x))
#define w_CACHE_z(z,y,x) *(GET_FACE_VAL_PTR(2,g_cache,4,z,y,x))
/*******************************************************************************
* The following function is meant to be explainatory code. This implementation
* is meant purely to explain the control flow and algorithm. Other
* implementations include optimizations that sometimes obfuscate the
* algorithm.
*
* Processing the boxes means that we will be reading the data from
* old_boxes and writing them to new_boxes
* The following are the equations for this calculation
*
* There are 5 components: p, e, u, v, w (density, energy, velocity (3D))
* Each of these components is represented as a 3D array (initialized
* above).
* p_{t+1}(z,y,x) = h(p_t,z,y,x) + h'(p_t,z,y,x) + h"(p_t,z,y,x)
* e_{t+1}(z,y,x) = h(e_t,z,y,x) + h'(e_t,z,y,x) + h"(e_t,z,y,x)
* u_{t+1}(z,y,x) = h(u_t,z,y,x) + h'(u_t,z,y,x) + h"(u_t,z,y,x)
* v_{t+1}(z,y,x) = h(v_t,z,y,x) + h'(v_t,z,y,x) + h"(v_t,z,y,x)
* w_{t+1}(z,y,x) = h(w_t,z,y,x) + h'(w_t,z,y,x) + h"(w_t,z,y,x)
*
* Computing face-centered, flux values based on cell-centered values
* g(component,z,y,x) is a stencil operation that looks like the following:
* g(c,z,y,x) = factor1*
* (c[z][y][x-2]+7*(c[z][y][x-1]+c[z][y][x])+c[z][y][x+1])
* similarly for g' and g"
* g'(c,z,y,x) = factor1*
* (c[z][y-2][x]+7*(c[z][y-1][x]+c[z][y][x])+c[z][y+1][x])
* g"(c,z,y,x) = factor1*
* (c[z-2][y][x]+7*(c[z-1][y][x]+c[z][y][x])+c[z+1][y][x])
*
* Computing cell-centered values based on face-centered flux values
* h(component,z,y,x) is a stencil operation that looks like the following:
* h(c,z,y,x) = factor2*
* (g(c,z,y,x+1)*g(u_t,z,y,x+1)-g(c,z,y,x)*g(u_t,z,y,x))
* h'(c,z,y,x) = factor2*
* (g'(c,z,y+1,x)*g'(v_t,z,y+1,x)-g'(c,z,y,x)*g'(v_t,z,y,x))
* h"(c,z,y,x) = factor2*
* (g"(c,z+1,y,x)*g"(w_t,z+1,y,x)-g"(c,z,y,x)*g"(w_t,z,y,x))
*
* in this example code we omit some space and time saving optimizations
* in order to make the code easy to learn. FIXME: are these correct?
* Step 1 is to calculate all of the g() values
* Step 2 multiplies the values together for the first column in the
* equations above
* Step 3 Return to Step 1 for g' and then for g"
*
* The following is a table describing how the notation above
* maps to the storage in code that we are using below
* value name | variable name
* ----------------------------------
* p_{t+1} | new_data[0]
* e_{t+1} | new_data[1]
* u_{t+1} | new_data[2]
* v_{t+1} | new_data[3]
* w_{t+1} | new_data[4]
* g(p_t) | g_cache[0]
* g(e_t) | g_cache[1]
* ... same pattern for u,v,w
* g'(p_t) | g_cache[0]
* ... continue pattern
* g"(p_t) | g_cache[0]
*
* g_cache can be reused because we accumulate into new_data between
* iterations
****************************************************************************/
Real** cfd_explain(int numCell, int numBox,int nGhost){
double time_spent;
struct timeval tv1, tv2;
// The size of the 3D data is (numCell+2*nGhost)^3
int full_numCell = numCell+2*nGhost;
int full_numCell2 = full_numCell*full_numCell;
int full_numCell3 = full_numCell*full_numCell+full_numCell;
int totalCells = full_numCell*full_numCell*full_numCell;
int flux_totalSize = numCell*numCell*(numCell+1);
// in order to access each box we need an array of pointers
Real **old_boxes = malloc(sizeof(Real*)*numBox);
Real **new_boxes = malloc(sizeof(Real*)*numBox);
// Allocate the 1D array of space (which is indexed as a 3D space)
// numComp is the number of components (think of 5 cubes of data)
int idx;
for(idx=0;idx<numBox;idx++){
old_boxes[idx] = malloc(sizeof(Real)*totalCells*numComp);
new_boxes[idx] = malloc(sizeof(Real)*totalCells*numComp);
}
// initialize the data within each box
int iz,iy,ix;
for(idx=0;idx < numBox;idx++){
Real* old_box = old_boxes[idx];
Real* new_box = new_boxes[idx];
// z,y,x traversal of all cells (including ghosts)
// the initial value is the manhattan distance from the cell coordinates
// to the origin of the non-ghost data multiplied
// by dx and added to a different constant per variable
for(iz=-nGhost;iz<(full_numCell-nGhost);iz++){
for(iy=-nGhost;iy<(full_numCell-nGhost);iy++){
for(ix=-nGhost;ix<(full_numCell-nGhost);ix++){
p_DATA_new(iz,iy,ix) = dx*(iz+iy+ix);
e_DATA_new(iz,iy,ix) = 1.+dx*(iz+iy+ix);
u_DATA_new(iz,iy,ix) = 2.+dx*(iz+iy+ix);
v_DATA_new(iz,iy,ix) = 3.+dx*(iz+iy+ix);
w_DATA_new(iz,iy,ix) = 4.+dx*(iz+iy+ix);
p_DATA_old(iz,iy,ix) = dx*(iz+iy+ix);
e_DATA_old(iz,iy,ix) = 1.+dx*(iz+iy+ix);
u_DATA_old(iz,iy,ix) = 2.+dx*(iz+iy+ix);
v_DATA_old(iz,iy,ix) = 3.+dx*(iz+iy+ix);
w_DATA_old(iz,iy,ix) = 4.+dx*(iz+iy+ix);
}
}
}
}
// end allocate and init
// begin process boxes
// allocate the g cache -- to save all of the g() calculations
// There is one value per face in the box. (numCell+1) is the
// number of faces in each direction.
Real* g_cache = malloc(sizeof(Real)*numCell*numCell*(numCell+1)*numComp);
//ALLOCATE_CACHES(numCell,numComp);
// DATA Usage:
// in this code we are reusing the g_cache to store 2 values per variable
// (component).
// Additionally we are accumulating the result into [p,e,u,v,w]_DATA
// iterate over all of the boxes
gettimeofday(&tv1, NULL);
for(idx=0;idx < numBox;idx++){
Real* old_box = old_boxes[idx];
Real* new_box = new_boxes[idx];
//---------------------- x-direction
// Iterate over faces and calculate g()
for(iz=0;iz<numCell;iz++){
for(iy=0;iy<numCell;iy++){
for(ix=0;ix<numCell+1;ix++){
p_CACHE_x(iz,iy,ix) = factor1*
(p_DATA_old(iz,iy,ix-2)+
7*(p_DATA_old(iz,iy,ix-1)+p_DATA_old(iz,iy,ix)) +
p_DATA_old(iz,iy,ix+1));
e_CACHE_x(iz,iy,ix) = factor1*
(e_DATA_old(iz,iy,ix-2)+
7*(e_DATA_old(iz,iy,ix-1)+e_DATA_old(iz,iy,ix)) +
e_DATA_old(iz,iy,ix+1));
u_CACHE_x(iz,iy,ix) = factor1*
(u_DATA_old(iz,iy,ix-2)+
7*(u_DATA_old(iz,iy,ix-1)+u_DATA_old(iz,iy,ix)) +
u_DATA_old(iz,iy,ix+1));
v_CACHE_x(iz,iy,ix) = factor1*
(v_DATA_old(iz,iy,ix-2)+
7*(v_DATA_old(iz,iy,ix-1)+v_DATA_old(iz,iy,ix)) +
v_DATA_old(iz,iy,ix+1));
w_CACHE_x(iz,iy,ix) = factor1*
(w_DATA_old(iz,iy,ix-2)+
7*(w_DATA_old(iz,iy,ix-1)+w_DATA_old(iz,iy,ix)) +
w_DATA_old(iz,iy,ix+1));
}
}
}
// compute part of h() and reuse space for g()
// u_CACHE_x(iz,iy,ix) must be written to last for reuse to work.
for(iz=0;iz<numCell;iz++){
for(iy=0;iy<numCell;iy++){
for(ix=0;ix<numCell+1;ix++){
p_CACHE_x(iz,iy,ix) *= factor2*u_CACHE_x(iz,iy,ix);
e_CACHE_x(iz,iy,ix) *= factor2*u_CACHE_x(iz,iy,ix);
v_CACHE_x(iz,iy,ix) *= factor2*u_CACHE_x(iz,iy,ix);
w_CACHE_x(iz,iy,ix) *= factor2*u_CACHE_x(iz,iy,ix);
u_CACHE_x(iz,iy,ix) *= factor2*u_CACHE_x(iz,iy,ix);
}
}
}
// finish h()
// iterate over cells
// and save the difference of the two adjacent faces into the cell data
for(iz=0;iz<numCell;iz++){
for(iy=0;iy<numCell;iy++){
for(ix=0;ix<numCell;ix++){
p_DATA_new(iz,iy,ix)+= p_CACHE_x(iz,iy,ix+1)-p_CACHE_x(iz,iy,ix);
e_DATA_new(iz,iy,ix)+= e_CACHE_x(iz,iy,ix+1)-e_CACHE_x(iz,iy,ix);
u_DATA_new(iz,iy,ix)+= u_CACHE_x(iz,iy,ix+1)-u_CACHE_x(iz,iy,ix);
v_DATA_new(iz,iy,ix)+= v_CACHE_x(iz,iy,ix+1)-v_CACHE_x(iz,iy,ix);
w_DATA_new(iz,iy,ix)+= w_CACHE_x(iz,iy,ix+1)-w_CACHE_x(iz,iy,ix);
}
}
}
//---------------------- y-direction
// Iterate over faces and calculate g'()
for(iz=0;iz<numCell;iz++){
for(iy=0;iy<numCell+1;iy++){
for(ix=0;ix<numCell;ix++){
p_CACHE_y(iz,iy,ix) = factor1*
(p_DATA_old(iz,iy-2,ix)+
7*(p_DATA_old(iz,iy-1,ix)+p_DATA_old(iz,iy,ix)) +
p_DATA_old(iz,iy+1,ix));
e_CACHE_y(iz,iy,ix) = factor1*
(e_DATA_old(iz,iy-2,ix)+
7*(e_DATA_old(iz,iy-1,ix)+e_DATA_old(iz,iy,ix)) +
e_DATA_old(iz,iy+1,ix));
u_CACHE_y(iz,iy,ix) = factor1*
(u_DATA_old(iz,iy-2,ix)+
7*(u_DATA_old(iz,iy-1,ix)+u_DATA_old(iz,iy,ix)) +
u_DATA_old(iz,iy+1,ix));
v_CACHE_y(iz,iy,ix) = factor1*
(v_DATA_old(iz,iy-2,ix)+
7*(v_DATA_old(iz,iy-1,ix)+v_DATA_old(iz,iy,ix)) +
v_DATA_old(iz,iy+1,ix));
w_CACHE_y(iz,iy,ix) = factor1*
(w_DATA_old(iz,iy-2,ix)+
7*(w_DATA_old(iz,iy-1,ix)+w_DATA_old(iz,iy,ix)) +
w_DATA_old(iz,iy+1,ix));
}
}
}
// compute part of h'() and reuse space for g'()
// v_CACHE_x(iz,iy,ix) must be written to last for reuse to work.
for(iz=0;iz<numCell;iz++){
for(iy=0;iy<numCell+1;iy++){
for(ix=0;ix<numCell;ix++){
p_CACHE_y(iz,iy,ix) = factor2*p_CACHE_y(iz,iy,ix)*v_CACHE_y(iz,iy,ix);
e_CACHE_y(iz,iy,ix) = factor2*e_CACHE_y(iz,iy,ix)*v_CACHE_y(iz,iy,ix);
u_CACHE_y(iz,iy,ix) = factor2*u_CACHE_y(iz,iy,ix)*v_CACHE_y(iz,iy,ix);
w_CACHE_y(iz,iy,ix) = factor2*w_CACHE_y(iz,iy,ix)*v_CACHE_y(iz,iy,ix);
v_CACHE_y(iz,iy,ix) = factor2*v_CACHE_y(iz,iy,ix)*v_CACHE_y(iz,iy,ix);
}
}
}
// finish h'()
// iterate over cells
// and save the difference of the two adjacent faces into the cell data
for(iz=0;iz<numCell;iz++){
for(iy=0;iy<numCell;iy++){
for(ix=0;ix<numCell;ix++){
p_DATA_new(iz,iy,ix)+= p_CACHE_y(iz,iy+1,ix)-p_CACHE_y(iz,iy,ix);
e_DATA_new(iz,iy,ix)+= e_CACHE_y(iz,iy+1,ix)-e_CACHE_y(iz,iy,ix);
u_DATA_new(iz,iy,ix)+= u_CACHE_y(iz,iy+1,ix)-u_CACHE_y(iz,iy,ix);
v_DATA_new(iz,iy,ix)+= v_CACHE_y(iz,iy+1,ix)-v_CACHE_y(iz,iy,ix);
w_DATA_new(iz,iy,ix)+= w_CACHE_y(iz,iy+1,ix)-w_CACHE_y(iz,iy,ix);
}
}
}
//---------------------- z-direction
// Iterate over faces and calculate g"()
for(iz=0;iz<numCell+1;iz++){
for(iy=0;iy<numCell;iy++){
for(ix=0;ix<numCell;ix++){
p_CACHE_z(iz,iy,ix) = factor1*
(p_DATA_old(iz-2,iy,ix)+
7*(p_DATA_old(iz-1,iy,ix)+p_DATA_old(iz,iy,ix)) +
p_DATA_old(iz+1,iy,ix));
e_CACHE_z(iz,iy,ix) = factor1*
(e_DATA_old(iz-2,iy,ix)+
7*(e_DATA_old(iz-1,iy,ix)+e_DATA_old(iz,iy,ix)) +
e_DATA_old(iz+1,iy,ix));
u_CACHE_z(iz,iy,ix) = factor1*
(u_DATA_old(iz-2,iy,ix)+
7*(u_DATA_old(iz-1,iy,ix)+u_DATA_old(iz,iy,ix)) +
u_DATA_old(iz+1,iy,ix));
v_CACHE_z(iz,iy,ix) = factor1*
(v_DATA_old(iz-2,iy,ix)+
7*(v_DATA_old(iz-1,iy,ix)+v_DATA_old(iz,iy,ix)) +
v_DATA_old(iz+1,iy,ix));
w_CACHE_z(iz,iy,ix) = factor1*
(w_DATA_old(iz-2,iy,ix)+
7*(w_DATA_old(iz-1,iy,ix)+w_DATA_old(iz,iy,ix)) +
w_DATA_old(iz+1,iy,ix));
}
}
}
// compute part of h"() and reuse space for g"()
// w_CACHE_x(iz,iy,ix) must be written to last for reuse to work.
for(iz=0;iz<numCell+1;iz++){
for(iy=0;iy<numCell;iy++){
for(ix=0;ix<numCell;ix++){
p_CACHE_z(iz,iy,ix) = factor2*p_CACHE_z(iz,iy,ix)*w_CACHE_z(iz,iy,ix);
e_CACHE_z(iz,iy,ix) = factor2*e_CACHE_z(iz,iy,ix)*w_CACHE_z(iz,iy,ix);
u_CACHE_z(iz,iy,ix) = factor2*u_CACHE_z(iz,iy,ix)*w_CACHE_z(iz,iy,ix);
v_CACHE_z(iz,iy,ix) = factor2*v_CACHE_z(iz,iy,ix)*w_CACHE_z(iz,iy,ix);
w_CACHE_z(iz,iy,ix) = factor2*w_CACHE_z(iz,iy,ix)*w_CACHE_z(iz,iy,ix);
}
}
}
// finish h"()
// iterate over cells
// and save the difference of the two adjacent faces into the cell data
for(iz=0;iz<numCell;iz++){
for(iy=0;iy<numCell;iy++){
for(ix=0;ix<numCell;ix++){
p_DATA_new(iz,iy,ix)+= p_CACHE_z(iz+1,iy,ix)-p_CACHE_z(iz,iy,ix);
e_DATA_new(iz,iy,ix)+= e_CACHE_z(iz+1,iy,ix)-e_CACHE_z(iz,iy,ix);
u_DATA_new(iz,iy,ix)+= u_CACHE_z(iz+1,iy,ix)-u_CACHE_z(iz,iy,ix);
v_DATA_new(iz,iy,ix)+= v_CACHE_z(iz+1,iy,ix)-v_CACHE_z(iz,iy,ix);
w_DATA_new(iz,iy,ix)+= w_CACHE_z(iz+1,iy,ix)-w_CACHE_z(iz,iy,ix);
}
}
}
if(tests){
//printf("\nResult\n");
int iz,iy,ix;
//printf("\nSTART BOX\n");
for(iz=0;iz<numCell;iz++){
printf("\n");
for(iy=0;iy<numCell;iy++){
printf("\n");
for(ix=0;ix<numCell;ix++){
printf("%.3lf,",p_DATA_new(iz,iy,ix));
}
}
}
//printf("\n");
}
}
gettimeofday(&tv2, NULL);
if(!tests)
printf ("LoopTime: %f\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
else
printf("\n");
return new_boxes;
}
/******************************************************************************
* allocate_and_init
* Input
* -----
* old_b pointer to old data
* new_b pointer to new data
* numCell the number of cells in a single direction
* numBox the number of boxes
* nGhost the number of ghost cells
*******************************************************************************/
Real** cfd_baseline(const int numCell,const int numBox){
// allocate and initialize the data
Real** old_boxes = malloc(sizeof(Real*)*numBox);
Real** new_boxes = malloc(sizeof(Real*)*numBox);
// allocate the space for each box
// this should be a contiguous area of memory numCell cubed in size
// they need to hold real values
int full_numCell = numCell+2*nGhost;
int full_numCell2 = full_numCell*full_numCell;
int full_numCell3 = full_numCell*full_numCell+full_numCell;
int totalCells = (numCell+2*nGhost)*(numCell+2*nGhost)*(numCell+2*nGhost);
int flux_totalSize = numCell*numCell*(numCell+1);
// the number of components
int idx;
for(idx=0;idx<numBox;idx++){
old_boxes[idx] = malloc(sizeof(Real)*totalCells*numComp);
new_boxes[idx] = malloc(sizeof(Real)*totalCells*numComp);
}
// initialize the data within each box
#ifdef OPEN_MP
#pragma omp parallel for default(shared) private(idx)
#endif
for(idx=0;idx < numBox;idx++){
Real* old_box = old_boxes[idx];
Real* new_box = new_boxes[idx];
int ic,iz,iy,ix;
for(ic=0;ic<numComp;ic++){
const Real phiOrigin = ic*compMultiplier;
// z,y,x traversal of all cells (including ghosts)
// the initial value is the manhattan distance from the cell coordinates
// to the origin of the non-ghost data multiplied
// by dx and added to a different constant per variable
for(iz=-nGhost;iz<(full_numCell-nGhost);iz++){
for(iy=-nGhost;iy<(full_numCell-nGhost);iy++){
// pointer arithmetic
Real* oldp = GET_VAL_PTR(old_box,ic,iz,iy,-nGhost);
Real* newp = GET_VAL_PTR(new_box,ic,iz,iy,-nGhost);
// inner loop with pointer increment
for(ix=-nGhost;ix<(full_numCell-nGhost);ix++){
*oldp = phiOrigin + dx*((iz)+(iy)+(ix));
*newp = phiOrigin + dx*((iz)+(iy)+(ix));
oldp++;
newp++;
}
}
}
}
}
// process each of the boxes
double time_spent;
struct timeval tv1, tv2;
// loop bounds
int phi_comp_mult = ((numCell+2*nGhost)*(numCell+2*nGhost)
*(numCell+2*nGhost));
const int phi_pencil_size = (numCell+2*nGhost);
const int flux_comp_mult = ((numCell)*(numCell)*(numCell+1));
// process each of the boxes one at a time
//
#ifdef OPEN_MP
printf("Num Threads: %d\n",omp_get_max_threads());
#endif
gettimeofday(&tv1, NULL);
#ifdef OPEN_MP
#pragma omp parallel for default(shared) private(idx)
#endif
for(idx=0;idx < numBox;idx++){
Real* old_box = old_boxes[idx];
Real* new_box = new_boxes[idx];
int f_xu,f_yu,f_zu;
int flux_pencil_x;
int flux_pencil_y;
int iDir,ic,iz,iy,ix;
int phiOffset1,phiOffset2,fluxOffset1;
// the flux cache
Real* fluxCache = malloc(sizeof(Real)*numCell*numCell*(numCell+1)*numComp);
// Allocate the space for the velocity cache
// This is only a single component
Real* velCache = malloc(sizeof(Real)*numCell*numCell*(numCell+1));
// compute the fluxes on the faces in each direction
for(iDir=0;iDir<3;iDir++){
// x-direction
if(iDir == 0){
f_zu = numCell;
f_yu = numCell;
f_xu = numCell+1;
flux_pencil_x = numCell+1;
flux_pencil_y = numCell;
phiOffset1 = 1;
phiOffset2 = 2;
fluxOffset1 = 1;
}else if(iDir == 1){
f_zu = numCell;
f_yu = numCell+1;
f_xu = numCell;
flux_pencil_x = numCell;
flux_pencil_y = numCell+1;
phiOffset1 = phi_pencil_size;
phiOffset2 = phi_pencil_size*2;
fluxOffset1 = numCell;
}else if(iDir == 2){
f_zu = numCell+1;
f_yu = numCell;
f_xu = numCell;
flux_pencil_x = numCell;
flux_pencil_y = numCell;
phiOffset1 = phi_pencil_size*phi_pencil_size;
phiOffset2 = phi_pencil_size*phi_pencil_size*2;
fluxOffset1 = numCell*numCell;
}
// the upper bounds are determined by direction info above
for(ic=0;ic<numComp;ic++){
// Iterate over faces and calculate g()
for(iz=0;iz<f_zu;iz++){
for(iy=0;iy<f_yu;iy++){
Real* phip = GET_VAL_PTR(old_box,ic,iz,iy,0);
Real* fluxp = GET_FACE_VAL_PTR(iDir,fluxCache,ic,iz,iy,0);
for(ix=0;ix<f_xu;ix++){
*fluxp = factor1*
((*(phip - phiOffset2)) +
7*((*(phip - phiOffset1)) + (*(phip))) +
(*(phip + phiOffset1)));
++phip;
++fluxp;
}
}
}
}
// cache the velocity component for the next half of the calculation
memcpy(velCache,
(fluxCache+(iDir+2)*((numCell+1)*numCell*numCell)),
sizeof(Real)*numCell*numCell*(numCell+1));
for(ic=0;ic<numComp;ic++){
for(iz=0;iz<f_zu;iz++){
for(iy=0;iy<f_yu;iy++){
//pointer arithmetic
Real* velp = velCache + iz*flux_pencil_y*flux_pencil_x+
iy*flux_pencil_x;
Real* fluxp = GET_FACE_VAL_PTR(iDir,fluxCache,ic,iz,iy,0);
// inner loop
for(ix=0;ix<f_xu;ix++){
*fluxp *= factor2*(*velp);
++fluxp;
++velp;
}
}
}
}
// compute the second half of the flux calculation
// accumulate the differences into the new data box
for(ic=0;ic<numComp;ic++){
for(iz=0;iz<numCell;iz++){
for(iy=0;iy<numCell;iy++){
// pointer arithmetic
Real* phip = GET_VAL_PTR(new_box,ic,iz,iy,0);
Real* fluxp = GET_FACE_VAL_PTR(iDir,fluxCache,ic,iz,iy,0);
for(ix=0;ix<numCell;ix++){
*phip += (*(fluxp + fluxOffset1)) - (*fluxp);
++phip;
++fluxp;
}
}
}
}
} // direction loop
free(fluxCache);
free(velCache);
} // box loop
gettimeofday(&tv2, NULL);
printf ("LoopTime: %f\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
if(tests){
printf("\nEnd Result\n");
int ic=0,iz,iy,ix;
printf("\nSTART BOX\n");
for(iz=0;iz<numCell;iz++){
printf("\n");
for(iy=0;iy<numCell;iy++){
printf("\n");
Real* phip = GET_VAL_PTR(new_boxes[0],ic,iz,iy,0);
for(ix=0;ix<numCell;ix++){
printf("%lf,",*phip);
++phip;
}
}
}
printf("\nEND BOX");
}
// cleanup the memory used -- unless we are doing verification
if(verify){ return new_boxes; }
for(idx=0;idx<numBox;idx++){
free(old_boxes[idx]);
free(new_boxes[idx]);
}
free(old_boxes);
free(new_boxes);
return NULL;
}
|
vednnConvolutionForward.c | #include "vednnConvolutionForward.h"
#include "vednn-def.h"
#include <stdint.h>
#include <assert.h>
#include <stdio.h>
#ifdef __cplusplus
extern "C" { //}
#endif
inline vednnError_t
vednnConvolutionForward_mb_threads( vednnConvForward_t pFunc,
VEDNN_CONVFWD_ARGS )
{
#ifndef VEDNN_USE_OPENMP
return pFunc(VEDNN_CONVFWD_ARGS_LIST);
#else
int64_t allBatch = pParamIn->batch; // check as in vednnx
if (allBatch == 1 || __vednn_omp_num_threads == 1) {
return pFunc(VEDNN_CONVFWD_ARGS_LIST);
}else{
//vednnError_t rc = VEDNN_SUCCESS ;
int rc = VEDNN_SUCCESS ; // for C++... now explicitly return valid enum
//#pragma omp parallel reduction(|:rc)
// Above is permitted if pFunc has no omp barriers,
// but DEADLY (hang) if pFunc has an omp barrier!
// Why? myBatch==0 test would NOT allow all threads to reach synchronization
// points in pFunc
// Ex. pFunc allocates a shared read-only scratchpad that expects
// **all** threads to synchronize on the size needed
// So instead set num_threads so myBatch>0 always.
int par = omp_get_max_threads();
if (allBatch < par) par = allBatch; // avoiding myBatch==0 is important.
#pragma omp parallel num_threads(par)
{
int64_t nthreads = omp_get_num_threads() ;
int64_t threadid = omp_get_thread_num() ;
int64_t nBatch = allBatch / nthreads ;
int64_t remain = allBatch % nthreads ;
int64_t batchBegin = nBatch * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myBatch = nBatch + ( threadid < remain ? 1 : 0 ) ;
assert(myBatch > 0); // NEW (bugfix)
//if( myBatch == 0 ) {
// rc |= VEDNN_SUCCESS ;
//}else{
vednnTensorParam_t _pParamIn = *pParamIn ; _pParamIn.batch = myBatch ;
vednnTensorParam_t _pParamOut = *pParamOut ; _pParamOut.batch = myBatch ;
float* _pDataIn = ((float *)pDataIn) + batchBegin * pParamIn->channel * pParamIn->height * pParamIn->width ;
float* _pDataOut = ((float *)pDataOut) + batchBegin * pParamOut->channel * pParamOut->height * pParamOut->width ;
rc |= pFunc(&_pParamIn, (void*)_pDataIn, pParamKernel, pDataKernel,
pParamBias, pDataBias, pParamConv, &_pParamOut, (void*) _pDataOut) ;
//}
}
return (vednnError_t)(rc<3? rc: 1); // 3 is not an allowed value
}
#endif
}
/* ----------------------------------------------------------------------- */
/** Weak Library symbol: override to test improved strategies.
*
* \return rc==VEDNN_SUCCESS and pFunc non-null,
* or rc==VEDNN_ERROR_INVALID_PARM
*/
vednnCnvFwdChoice_t
vednnConvolutionForwardChoice( VEDNN_CONVFWD_API_ARGS )
{
// decision tree will set rc, pFunc, impl and wrapper type
vednnError_t rc = VEDNN_SUCCESS;
vednnConvForward_t pFunc = NULL; // internal function pointer
char const* impl = "unset"; // internal impl name (for messages or ftrace)
int mb_threads = 1; // threads-wrapper type
// TODO: harmonize impl name with libvednnx (maybe via vednn.h API mods)
// A quick initial INVALID_PARM check...
switch( pParamKernel->layout ) {
case VEDNN_FILTER_LAYOUT_NCHW :
break ;
case VEDNN_FILTER_LAYOUT_HWCN :
if( pParamConv->group > 1 ) {
fprintf(stderr, "[VEDNN ERROR] VEDNN does not support grouped convolution with filter_hwcn\n") ;
rc = VEDNN_ERROR_INVALID_PARAM ;
}
break ;
default :
fprintf(stderr, "[VEDNN ERROR] Unknown Filter Layout %d\n", pParamKernel->layout) ;
rc = VEDNN_ERROR_INVALID_PARAM ;
}
// NOTE: OMPWRAP and NOWRAP are CODE-BLOCK macros, not statements
// Set normal ||ism over minibatch, and exit decision tree
#define OMPWRAP( IMPL ) \
{ \
impl = "mb-" #IMPL; \
pFunc = vednnConvolutionForward_direct_##IMPL; \
/*mb_threads = 1; default*/ \
break; \
}
// pFunc handles ||ism internally, and exit decision tree
#define NOWRAP( IMPL ) \
{ \
impl = #IMPL; \
pFunc = vednnConvolutionForward_direct_##IMPL; \
mb_threads = 0; \
break; \
}
if (rc == VEDNN_SUCCESS) do { // allow 'break' to easily exit after any pFunc or rc is set.
if (algo == VEDNN_CONV_ALGORITHM_DIRECT)
{
#define DIL(N) (pParamConv->dilationHeight == (N) && pParamConv->dilationWidth == (N))
#define PAD(N) (pParamConv->padHeight == (N) && pParamConv->padWidth == (N))
#define STR(N) (pParamConv->strideHeight == (N) && pParamConv->strideWidth == (N))
#define KER(N) (pParamKernel->width == (N) && pParamKernel->height == (N))
#define IWU(N) (pParamIn->width <= (N))
#define OWU(N) (pParamOut->width <= (N))
#define ICoGU(N) (pParamIn->channel / pParamConv->group <= (N))
#define OCoGU(N) (pParamOut->channel / pParamConv->group <= (N))
if ((pParamOut->height * pParamOut->width <= 16) ||
((pParamOut->height * pParamOut->width < 64)
&& (pParamOut->height * pParamOut->width < pParamIn->channel)
// ... !(DIL(1) && STR(1) && KER(1)) ???
&& ( pParamConv->dilationHeight | pParamConv->dilationWidth
| pParamConv->strideHeight | pParamConv->strideWidth
| pParamKernel->height | pParamKernel->width) != 1 )
)
{
// small images may have a fast vecC
if (KER(3) && DIL(1) && STR(1) && PAD(1))
OMPWRAP(vecC_dil1_str1_pad1_ker3)//;
else if (KER(1) && DIL(1) && PAD(0)
&& pParamOut->height == (pParamIn->height - pParamKernel->height) / pParamConv->strideHeight + 1
&& pParamOut->width == (pParamIn->width - pParamKernel->width) / pParamConv->strideWidth + 1)
{
if (ICoGU(1024)) OMPWRAP(vecC_dil1_pad0_ker1_cU1024)//;
else OMPWRAP(vecC_dil1_pad0_ker1)//;
}
OMPWRAP(vecC)//;
}
#ifdef VEDNN_ALT_PARALLEL // resnext branch : AGGRESSIVE use of gemm for all stride > 1 ?
if (!STR(1)) {
// if (STR(2) && DIL(1) && PAD(1) && OWU(128)) {
// if (KER(3)) OMPWRAP(dil1_str2_pad1_ker3_owU128)//;
// if (KER(4)) OMPWRAP(dil1_str2_pad1_ker4_owU128)//;
// }
// try using gemm in most cases with stride > 1
if(OCoGU(256) && OWU(128)) NOWRAP(owU128_T)//;
else NOWRAP(gemm)//;
}
#endif
if (STR(1) && DIL(1)
&& pParamIn->height == pParamOut->height
&& pParamIn->width == pParamOut->width )
{ // d1s1pS ...
if (KER(1)) {
#ifdef VEDNN_ALT_PARALLEL // new: CHECKME
if(OWU(128)) NOWRAP(dil1_str1_pad0_ker1_T)//;
//else OMPWRAP(dil1_str1_pad0_ker1)//;
NOWRAP(gemm) // always faster?;
#else
OMPWRAP(dil1_str1_pad0_ker1)//;
#endif
}
if (KER(3)){ // d1s1pSk3
if (pParamIn->channel == pParamConv->group){ // aka inputChannelGroup==1
if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker3_c1_owU128)//;
OMPWRAP(dil1_str1_padsame_ker3_c1)//;
}
#ifdef VEDNN_ALT_PARALLEL
if (pParamIn->batch < 8) { // checkme!
if (pParamKernel->inChannel % 1024 == 0) // really!?
NOWRAP(dil1_str1_padsame_ker3_c1024x_T)//;
NOWRAP(dil1_str1_padsame_ker3_T)//;
}
#else
OMPWRAP(dil1_str1_padsame_ker3) // is this ever faster?//;
#endif
}
if (KER(5)) { // d1s1pSk5
if (OWU(128)) OMPWRAP(dil1_str1_padsame_ker5_owU128)//;
//
// XXX the following change 01-29-2021 "mem error fix"
// produces wrong output and even sometimes memory corruption.
// Removed (perhaps revert to the memory error version?
//
//else if(pParamIn->height >= 5) OMPWRAP(dil1_str1_padsame_ker5)//;
//
// The following is a much slower substitute, gemm seems faster.
//OMPWRAP(dil1_str1_padsame)//;
//
// uninvestigated (sometimes slightly faster): if (pParamIn->batch >= 4) OMPWRAP(gemm);
NOWRAP(gemm); // this seems to do very well (often 25% faster)
//
}
if (KER(2)) OMPWRAP(dil1_str1_padsame_ker2)//;
OMPWRAP(dil1_str1_padsame)//;
} // end d1s1pS
if (DIL(1) && PAD(0)
&& pParamOut->height == (pParamIn->height - pParamKernel->height) / pParamConv->strideHeight + 1
&& pParamOut->width == (pParamIn->width - pParamKernel->width) / pParamConv->strideWidth + 1 )
{ // d1p0 and oh expected value
if (STR(1))
{ // d1s1p0
if (KER(3) // && IWU(256) // XXX original concords with impl name
// XXX but actually it seems the "correctly able to run" condition is
// (though often the ioaligned may not be fastest, even though code
// looks good. If many channels, often 2x slower).
&& OWU(256)
&& (pParamIn->width & 0x1) == 0 && (((uint64_t)pDataIn) & 0x7) == 0
&& (pParamOut->width & 0x1) == 0 && (((uint64_t)pDataOut) & 0x7) == 0 )
OMPWRAP(dil1_str1_pad0_ker3_iw2XU256_ow2X_ioaligned)//;
if (KER(4) && IWU(256)) OMPWRAP(dil1_str1_pad0_ker4_iwU256)//;
if (OWU(128)) OMPWRAP(dil1_str1_pad0_owU128)//;
OMPWRAP(dil1_str1_pad0)//;
} else if (KER(1)) { // d1s>1p0k1
if (OWU(128)) OMPWRAP(dil1_pad0_owU128_ker1)//;
OMPWRAP(dil1_pad0_ker1)//;
}
{ // d1s>1p0k>1
// todo: this part of tree seems to target d1p0owU128, mostly
if (OWU(128)){
#ifdef VEDNN_ALT_PARALLEL
// XXX 3 possibilities:
// OMPWRAP(dil1_pad0_owU128)//;
// NOWRAP(owU128_T)//;
// NOWRAP(gemm)//;
if(OCoGU(256)) NOWRAP(owU128_T) // NEW mb+g threading//;
NOWRAP(gemm) // NEW: is this case always faster than dil1_pad0_owU128?//;
#else
OMPWRAP(dil1_pad0_owU128)//;
#endif
}else{
#ifdef VEDNN_ALT_PARALLEL
NOWRAP(gemm) // always faster than dil1_pad0 ?//;
#else
OMPWRAP(dil1_pad0)//;
#endif
}
}
} // end d1p0 and oh expected value
if (STR(2) && DIL(1) && PAD(1) && OWU(128)) {
if (KER(3)) OMPWRAP(dil1_str2_pad1_ker3_owU128)//;
if (KER(4)) OMPWRAP(dil1_str2_pad1_ker4_owU128)//;
}
if (OWU(128)) OMPWRAP(owU128)//;
OMPWRAP(default)//;
}
}while(0);
// Decision tree has set impl, pFunc and mb_threads [hopefully]
if (pFunc == NULL) rc = VEDNN_ERROR_INVALID_PARAM;
vednnCnvFwdChoice_t ret = { rc, impl, pFunc, mb_threads };
return ret;
}
#undef OCoGU
#undef ICoGU
#undef OWU
#undef IWU
#undef KER
#undef STR
#undef PAD
#undef DIL
#undef OMPWRAP
#ifdef VEDNN_ALT_PARALLEL
//#undef ALT_RET
#undef NOWRAP
#endif
/* ----------------------------------------------------------------------- */
/** \b with the bias arguments.
*
* This implementation is non-gemm, no-intrinsics ncc code.
* Surprisingly, it is the fastest impl in some cases!
*/
vednnError_t vednnConvolutionForwardAddBias( VEDNN_CONVFWD_API_ARGS )
{
// run the decision tree
vednnCnvFwdChoice_t const c = vednnConvolutionForwardChoice(VEDNN_CONVFWD_API_ARGS_LIST);
vednnError_t rc = c.rc; // initial value only
if (rc == VEDNN_SUCCESS) { //
// debug...
//fprintf(stderr, " cnvFwd-def=%s\n", c.impl); fflush(stderr);
assert( c.pFunc != NULL );
// ftrace according to compile flags
// Consider changing impl to reflect bias XXX
FTRACE_BEGIN(c.impl); // impl likely differs from vednnConvolutionLists.c name
// run with or without threading over minibatch
if (c.mb_threads) { // call with default conv fwd ||ism wrapper
rc = vednnConvolutionForward_mb_threads(c.pFunc, VEDNN_CONVFWD_ARGS_LIST);
}else{ // call without any threading wrapper
rc = c.pFunc(VEDNN_CONVFWD_ARGS_LIST);
}
FTRACE_END(c.impl); // note different from src/wrap vednnx extensions :(
}
return rc;
}
/* ----------------------------------------------------------------------- */
/** \b without the bias arguments (auto-supplying NULL for bias args). */
vednnError_t vednnConvolutionForward(
const vednnTensorParam_t *pParamIn,
const void *pDataIn,
const vednnFilterParam_t *pParamKernel,
const void *pDataKernel,
const vednnTensorParam_t *pParamOut,
void *pDataOut,
const vednnConvolutionParam_t *pParamConv,
vednnConvolutionAlgorithm_t algo
)
{
return vednnConvolutionForwardAddBias(pParamIn, pDataIn,
pParamKernel, pDataKernel, NULL, NULL,
pParamOut, pDataOut, pParamConv, algo );
}
#ifdef __cplusplus
}//extern "C"
#endif
// vim: et ts=2 sw=2 cindent cino=+4s,^l0,\:s syntax=cpp.doxygen
|
GradientImageFilter.h | /*
* MIT License
*
* Copyright (c) 2018-2019 Benjamin Köhler
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#ifndef BK_GRADIENTIMAGEFILTER_H
#define BK_GRADIENTIMAGEFILTER_H
#include <type_traits>
#include <bkMath/functions/list_grid_id_conversion.h>
#ifdef BK_EMIT_PROGRESS
#include <bk/Progress>
#include <bk/Localization>
#endif
namespace bk
{
class GradientImageFilter
{
//====================================================================================================
//===== DEFINITIONS
//====================================================================================================
using self_type = GradientImageFilter;
//====================================================================================================
//===== CONSTRUCTORS & DESTRUCTOR
//====================================================================================================
public:
/// @{ -------------------------------------------------- CTOR
constexpr GradientImageFilter() = default;
constexpr GradientImageFilter(const self_type&) = default;
constexpr GradientImageFilter(self_type&&) noexcept = default;
/// @}
/// @{ -------------------------------------------------- DTOR
~GradientImageFilter() = default;
/// @}
//====================================================================================================
//===== SETTER
//====================================================================================================
/// @{ -------------------------------------------------- OPERATOR =
[[maybe_unused]] constexpr auto operator=(const self_type& other) -> self_type& = default;
[[maybe_unused]] constexpr auto operator=(self_type&& other) noexcept -> self_type& = default;
/// @}
//====================================================================================================
//===== FUNCTIONS
//====================================================================================================
/// @{ -------------------------------------------------- APPLY
template<typename TImage>
[[nodiscard]] static auto apply(const TImage& img) -> typename TImage::template self_template_type<std::decay_t<decltype(img.jacobian(bk::list_to_grid_id(img.size(), 0)))>>
{
using gradient_type = std::decay_t<decltype(img.jacobian(bk::list_to_grid_id(img.size(), 0)))>;
#ifdef BK_EMIT_PROGRESS
bk::Progress& prog = bk_progress.emplace_task(10 + img.num_values(), ___("Gradient image filter"));
#endif
typename TImage::template self_template_type<gradient_type> res;
res.set_size(img.size());
#ifdef BK_EMIT_PROGRESS
prog.increment(10);
#endif
#pragma omp parallel for
for (unsigned int i = 0; i < img.num_values(); ++i)
{
res[i] = img.jacobian(bk::list_to_grid_id(img.size(), i));
#ifdef BK_EMIT_PROGRESS
#pragma omp critical(filter_gradient_strength)
{ prog.increment(1); }
#endif
}
#ifdef BK_EMIT_PROGRESS
prog.set_finished();
#endif
return res;
}
/// @}
}; // class GradientImageFilter
} // namespace bk
#endif //BK_GRADIENTIMAGEFILTER_H
|
j3d27pt.c | #define BENCH_DIM 3
#define BENCH_FPP 54
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
#pragma scop
for (int t = 0; t < timestep; t++)
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
(1.500f*A[t%2][i-1][j][k] +
0.500f*A[t%2][i-1][j-1][k-1] +
0.700f*A[t%2][i-1][j-1][k] +
0.900f*A[t%2][i-1][j-1][k+1] +
1.200f*A[t%2][i-1][j][k-1] +
1.201f*A[t%2][i-1][j][k+1] +
0.901f*A[t%2][i-1][j+1][k-1] +
0.701f*A[t%2][i-1][j+1][k] +
0.501f*A[t%2][i-1][j+1][k+1] +
1.510f*A[t%2][i][j][k] +
0.510f*A[t%2][i][j-1][k-1] +
0.710f*A[t%2][i][j-1][k] +
0.910f*A[t%2][i][j-1][k+1] +
1.210f*A[t%2][i][j][k-1] +
1.211f*A[t%2][i][j][k+1] +
0.911f*A[t%2][i][j+1][k-1] +
0.711f*A[t%2][i][j+1][k] +
0.511f*A[t%2][i][j+1][k+1] +
1.520f*A[t%2][i+1][j][k] +
0.520f*A[t%2][i+1][j-1][k-1] +
0.720f*A[t%2][i+1][j-1][k] +
0.920f*A[t%2][i+1][j-1][k+1] +
1.220f*A[t%2][i+1][j][k-1] +
1.221f*A[t%2][i+1][j][k+1] +
0.921f*A[t%2][i+1][j+1][k-1] +
0.721f*A[t%2][i+1][j+1][k] +
0.521f*A[t%2][i+1][j+1][k+1]) / 159;
#pragma endscop
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
(1.500f*A[t%2][i-1][j][k] +
0.500f*A[t%2][i-1][j-1][k-1] +
0.700f*A[t%2][i-1][j-1][k] +
0.900f*A[t%2][i-1][j-1][k+1] +
1.200f*A[t%2][i-1][j][k-1] +
1.201f*A[t%2][i-1][j][k+1] +
0.901f*A[t%2][i-1][j+1][k-1] +
0.701f*A[t%2][i-1][j+1][k] +
0.501f*A[t%2][i-1][j+1][k+1] +
1.510f*A[t%2][i][j][k] +
0.510f*A[t%2][i][j-1][k-1] +
0.710f*A[t%2][i][j-1][k] +
0.910f*A[t%2][i][j-1][k+1] +
1.210f*A[t%2][i][j][k-1] +
1.211f*A[t%2][i][j][k+1] +
0.911f*A[t%2][i][j+1][k-1] +
0.711f*A[t%2][i][j+1][k] +
0.511f*A[t%2][i][j+1][k+1] +
1.520f*A[t%2][i+1][j][k] +
0.520f*A[t%2][i+1][j-1][k-1] +
0.720f*A[t%2][i+1][j-1][k] +
0.920f*A[t%2][i+1][j-1][k+1] +
1.220f*A[t%2][i+1][j][k-1] +
1.221f*A[t%2][i+1][j][k+1] +
0.921f*A[t%2][i+1][j+1][k-1] +
0.721f*A[t%2][i+1][j+1][k] +
0.521f*A[t%2][i+1][j+1][k+1]) / 159;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
atomic_write_codegen.c | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
_Bool bv, bx;
char cv, cx;
unsigned char ucv, ucx;
short sv, sx;
unsigned short usv, usx;
int iv, ix;
unsigned int uiv, uix;
long lv, lx;
unsigned long ulv, ulx;
long long llv, llx;
unsigned long long ullv, ullx;
float fv, fx;
double dv, dx;
long double ldv, ldx;
_Complex int civ, cix;
_Complex float cfv, cfx;
_Complex double cdv, cdx;
typedef int int4 __attribute__((__vector_size__(16)));
int4 int4x;
struct BitFields {
int : 32;
int a : 31;
} bfx;
struct BitFields_packed {
int : 32;
int a : 31;
} __attribute__ ((__packed__)) bfx_packed;
struct BitFields2 {
int : 31;
int a : 1;
} bfx2;
struct BitFields2_packed {
int : 31;
int a : 1;
} __attribute__ ((__packed__)) bfx2_packed;
struct BitFields3 {
int : 11;
int a : 14;
} bfx3;
struct BitFields3_packed {
int : 11;
int a : 14;
} __attribute__ ((__packed__)) bfx3_packed;
struct BitFields4 {
short : 16;
int a: 1;
long b : 7;
} bfx4;
struct BitFields4_packed {
short : 16;
int a: 1;
long b : 7;
} __attribute__ ((__packed__)) bfx4_packed;
typedef float float2 __attribute__((ext_vector_type(2)));
float2 float2x;
register int rix __asm__("0");
int main() {
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
bx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
cx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
ucx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i16
#pragma omp atomic write
sx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i16
#pragma omp atomic write
usx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
#pragma omp atomic write
ix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
#pragma omp atomic write
uix = uiv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
lx = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
ulx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
llx = llv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
ullx = ullv;
// CHECK: load float, float*
// CHECK: bitcast float {{.*}} to i32
// CHECK: store atomic i32 {{.*}}, i32* bitcast (float*
#pragma omp atomic write
fx = fv;
// CHECK: load double, double*
// CHECK: bitcast double {{.*}} to i64
// CHECK: store atomic i64 {{.*}}, i64* bitcast (double*
#pragma omp atomic write
dx = dv;
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80*
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
// CHECK: store x86_fp80 [[LD]], x86_fp80* [[LDTEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
// CHECK: [[LD:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[LD]], i128* bitcast (x86_fp80*
#pragma omp atomic write
ldx = ldv;
// CHECK: [[REAL_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[REAL_VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 [[IMG_VAL]], i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cix = civ;
// CHECK: [[REAL_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
// CHECK: store float [[REAL_VAL]], float* [[TEMP_REAL_REF]]
// CHECK: store float [[IMG_VAL]], float* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ float, float }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cfx = cfv;
// CHECK: [[REAL_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
// CHECK: store double [[REAL_VAL]], double* [[TEMP_REAL_REF]]
// CHECK: store double [[IMG_VAL]], double* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 16, i8* bitcast ({ double, double }* @{{.*}} to i8*), i8* [[BITCAST]], i32 5)
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst write
cdx = cdv;
// CHECK: load i8, i8*
// CHECK: store atomic i64
#pragma omp atomic write
ulx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
bx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write, seq_cst
cx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i64
#pragma omp atomic write
ulx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i64
#pragma omp atomic write
lx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst, write
uix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
#pragma omp atomic write
ix = uiv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = trunc i64 %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cix = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i32 %{{.+}}, i32* bitcast (float*
#pragma omp atomic write
fx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double*
#pragma omp atomic write
dx = llv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = uitofp i64 %{{.+}} to x86_fp80
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
// CHECK: store x86_fp80 [[VAL]], x86_fp80* [[TEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
// CHECK: [[VAL:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[VAL]], i128* bitcast (x86_fp80*
#pragma omp atomic write
ldx = ullv;
// CHECK: load float, float*
// CHECK: [[VAL:%.+]] = fptosi float %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cix = fv;
// CHECK: load double, double*
// CHECK: store atomic i16
#pragma omp atomic write
sx = dv;
// CHECK: load x86_fp80, x86_fp80*
// CHECK: store atomic i8
#pragma omp atomic write
bx = ldv;
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0)
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1)
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: or i1
// CHECK: store atomic i8
#pragma omp atomic write
bx = civ;
// CHECK: load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: store atomic i16
#pragma omp atomic write
usx = cfv;
// CHECK: load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0)
// CHECK: store atomic i64
#pragma omp atomic write
llx = cdv;
// CHECK-DAG: [[IDX:%.+]] = load i16, i16* @{{.+}}
// CHECK-DAG: load i8, i8*
// CHECK-DAG: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I128:%.+]] = phi i128 [ [[I128VAL]], %{{.+}} ], [ [[FAILED_I128_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
// CHECK: store i128 [[OLD_I128]], i128* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[VEC_ITEM_VAL]], i16 [[IDX]]
// CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_I128:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic
// CHECK: [[FAILED_I128_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
int4x[sv] = bv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST]], i32 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]],
// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[LDTEMP1:%.+]],
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP1]],
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 [[OLD_BF_VALUE]], -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP1]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP1]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, 2147483647
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24*
// CHECK: [[BITCAST:%.+]] = bitcast i24* %{{.+}} to i8*
// CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST]], i32 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_VAL:%.+]] = load i24, i24* %{{.+}},
// CHECK: store i24 [[OLD_VAL]], i24* [[TEMP:%.+]],
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24
// CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3
// CHECK: [[BF_CLEAR:%.+]] = and i24 %{{.+}}, -131065
// CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i24 %{{.+}}, i24* [[TEMP]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[TEMP]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64
// CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -65537
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_VALUE:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, -2
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i64 [[NEW_VAL]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 17
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -16646145
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.b = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 1
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4_packed.b = ldv;
// CHECK: load i64, i64*
// CHECK: [[VEC_ITEM_VAL:%.+]] = uitofp i64 %{{.+}} to float
// CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I64:%.+]] = phi i64 [ [[I64VAL]], %{{.+}} ], [ [[FAILED_I64_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
// CHECK: store i64 [[OLD_I64]], i64* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0
// CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_I64:%.+]] = load i64, i64* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic
// CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
float2x.x = ulv;
// CHECK: call i32 @llvm.read_register.i32(
// CHECK: sitofp i32 %{{.+}} to double
// CHECK: bitcast double %{{.+}} to i64
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* @{{.+}} to i64*) seq_cst
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write seq_cst
dv = rix;
return 0;
}
#endif
|
backprop.c | /*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#ifdef OPEN
#include <omp.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
#include <math.h>
//#define OPEN
#define ABS(x) (((x) > 0.0) ? (x) : (-(x)))
#define fastcopy(to,from,len)\
{\
register char *_to,*_from;\
register int _i,_l;\
_to = (char *)(to);\
_from = (char *)(from);\
_l = (len);\
for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\
}
/*** Return random number between 0.0 and 1.0 ***/
float drnd()
{
return ((float) rand() / (float) BIGRND);
}
/*** Return random number between -1.0 and 1.0 ***/
float dpn1()
{
return ((drnd() * 2.0) - 1.0);
}
/*** The squashing function. Currently, it's a sigmoid. ***/
float squash(x)
float x;
{
float m;
//x = -x;
//m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
//return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
/*** Allocate 1d array of floats ***/
float *alloc_1d_dbl(n)
int n;
{
float *new;
new = (float *) malloc ((unsigned) (n * sizeof (float)));
if (new == NULL) {
printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n");
return (NULL);
}
return (new);
}
/*** Allocate 2d array of floats ***/
float **alloc_2d_dbl(m, n)
int m, n;
{
int i;
float **new;
new = (float **) malloc ((unsigned) (m * sizeof (float *)));
if (new == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
new[i] = alloc_1d_dbl(n);
}
return (new);
}
bpnn_randomize_weights(w, m, n)
float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = (float) rand()/RAND_MAX;
// w[i][j] = dpn1();
}
}
}
bpnn_randomize_row(w, m)
float *w;
int m;
{
int i;
for (i = 0; i <= m; i++) {
//w[i] = (float) rand()/RAND_MAX;
w[i] = 0.1;
}
}
bpnn_zero_weights(w, m, n)
float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = 0.0;
}
}
}
void bpnn_initialize(seed)
{
printf("Random number generator seed: %d\n", seed);
srand(seed);
}
BPNN *bpnn_internal_create(n_in, n_hidden, n_out)
int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = (BPNN *) malloc (sizeof (BPNN));
if (newnet == NULL) {
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
void bpnn_free(net)
BPNN *net;
{
int n1, n2, i;
n1 = net->input_n;
n2 = net->hidden_n;
free((char *) net->input_units);
free((char *) net->hidden_units);
free((char *) net->output_units);
free((char *) net->hidden_delta);
free((char *) net->output_delta);
free((char *) net->target);
for (i = 0; i <= n1; i++) {
free((char *) net->input_weights[i]);
free((char *) net->input_prev_weights[i]);
}
free((char *) net->input_weights);
free((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
free((char *) net->hidden_weights[i]);
free((char *) net->hidden_prev_weights[i]);
}
free((char *) net->hidden_weights);
free((char *) net->hidden_prev_weights);
free((char *) net);
}
/*** Creates a new fully-connected network from scratch,
with the given numbers of input, hidden, and output units.
Threshold units are automatically included. All weights are
randomly initialized.
Space is also allocated for temporary storage (momentum weights,
error computations, etc).
***/
BPNN *bpnn_create(n_in, n_hidden, n_out)
int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = bpnn_internal_create(n_in, n_hidden, n_out);
#ifdef INITZERO
bpnn_zero_weights(newnet->input_weights, n_in, n_hidden);
#else
bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden);
#endif
bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out);
bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden);
bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out);
bpnn_randomize_row(newnet->target, n_out);
return (newnet);
}
void bpnn_layerforward(l1, l2, conn, n1, n2)
float *l1, *l2, **conn;
int n1, n2;
{
float sum;
int j, k;
/*** Set up thresholding unit ***/
l1[0] = 1.0;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static)
#endif
/*** For each unit in second layer ***/
for (j = 1; j <= n2; j++) {
/*** Compute weighted sum of its inputs ***/
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn[k][j] * l1[k];
}
l2[j] = squash(sum);
}
}
//extern "C"
void bpnn_output_error(delta, target, output, nj, err)
float *delta, *target, *output, *err;
int nj;
{
int j;
float o, t, errsum;
errsum = 0.0;
for (j = 1; j <= nj; j++) {
o = output[j];
t = target[j];
delta[j] = o * (1.0 - o) * (t - o);
errsum += ABS(delta[j]);
}
*err = errsum;
}
void bpnn_hidden_error(delta_h,
nh,
delta_o,
no,
who,
hidden,
err)
float *delta_h, *delta_o, *hidden, **who, *err;
int nh, no;
{
int j, k;
float h, sum, errsum;
errsum = 0.0;
for (j = 1; j <= nh; j++) {
h = hidden[j];
sum = 0.0;
for (k = 1; k <= no; k++) {
sum += delta_o[k] * who[j][k];
}
delta_h[j] = h * (1.0 - h) * sum;
errsum += ABS(delta_h[j]);
}
*err = errsum;
}
void bpnn_adjust_weights(delta, ndelta, ly, nly, w, oldw)
float *delta, *ly, **w, **oldw;
{
float new_dw;
int k, j;
ly[0] = 1.0;
//eta = 0.3;
//momentum = 0.3;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for \
shared(oldw, w, delta) \
private(j, k, new_dw) \
firstprivate(ndelta, nly, momentum)
#endif
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j]));
w[k][j] += new_dw;
oldw[k][j] = new_dw;
}
}
}
void bpnn_feedforward(net)
BPNN *net;
{
int in, hid, out;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
}
void bpnn_train(net, eo, eh)
BPNN *net;
float *eo, *eh;
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
/*** Compute error on output and hidden units. ***/
bpnn_output_error(net->output_delta, net->target, net->output_units,
out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out,
net->hidden_weights, net->hidden_units, &hid_err);
*eo = out_err;
*eh = hid_err;
/*** Adjust input and hidden weights. ***/
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid,
net->hidden_weights, net->hidden_prev_weights);
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in,
net->input_weights, net->input_prev_weights);
}
void bpnn_save(net, filename)
BPNN *net;
char *filename;
{
int n1, n2, n3, i, j, memcnt;
float dvalue, **w;
char *mem;
///add//
FILE *pFile;
pFile = fopen( filename, "w+" );
///////
/*
if ((fd = creat(filename, 0644)) == -1) {
printf("BPNN_SAVE: Cannot create '%s'\n", filename);
return;
}
*/
n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n;
printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename);
//fflush(stdout);
//write(fd, (char *) &n1, sizeof(int));
//write(fd, (char *) &n2, sizeof(int));
//write(fd, (char *) &n3, sizeof(int));
fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile);
memcnt = 0;
w = net->input_weights;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n1+1) * (n2+1) * sizeof(float));
fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile);
free(mem);
memcnt = 0;
w = net->hidden_weights;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n2+1) * (n3+1) * sizeof(float));
fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile);
free(mem);
fclose(pFile);
return;
}
BPNN *bpnn_read(filename)
char *filename;
{
char *mem;
BPNN *new;
int fd, n1, n2, n3, i, j, memcnt;
if ((fd = open(filename, 0, 0644)) == -1) {
return (NULL);
}
printf("Reading '%s'\n", filename); //fflush(stdout);
read(fd, (char *) &n1, sizeof(int));
read(fd, (char *) &n2, sizeof(int));
read(fd, (char *) &n3, sizeof(int));
new = bpnn_internal_create(n1, n2, n3);
printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3);
printf("Reading input weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
read(fd, mem, (n1+1) * (n2+1) * sizeof(float));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
fastcopy(&(new->input_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
printf("Done\nReading hidden weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
read(fd, mem, (n2+1) * (n3+1) * sizeof(float));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
fastcopy(&(new->hidden_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
close(fd);
printf("Done\n"); //fflush(stdout);
bpnn_zero_weights(new->input_prev_weights, n1, n2);
bpnn_zero_weights(new->hidden_prev_weights, n2, n3);
return (new);
}
|
anakin_thread_parallel_nd.h | /*******************************************************************************
* Copyright (c) 2018 Anakin Authors All Rights Reserve.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifndef SABER_FUNCS_IMPL_X86_ANAKIN_THREAD_PARALLEL_ND_H
#define SABER_FUNCS_IMPL_X86_ANAKIN_THREAD_PARALLEL_ND_H
#include <utility>
/* This header must be included by anakin_thread.hpp only */
/* Functions:
* - parallel(nthr, f) - executes f in parallel using at most
* nthr threads. If nthr equals 0
* anakin_get_max_threads() threads is
* used
* - for_nd(ithr, nthr, dims..., f) - multidimensional for loop for already
* created threads
* - parallel_nd(dims..., f) - creates a parallel section and then
* calls for_nd
* - parallel_nd_in_omp(dims..., f) - queries current nthr and ithr and then
* calls for_nd (mostly for convenience)
*/
namespace anakin {
namespace saber {
/* general parallelization */
template <typename F>
void parallel(int nthr, F f) {
if (nthr == 0) nthr = anakin_get_max_threads();
#if ANAKIN_THR == ANAKIN_THR_SEQ
assert(nthr == 1);
f(0, 1);
#elif ANAKIN_THR == ANAKIN_THR_OMP
if (nthr == 1) { f(0, 1); return; }
# pragma omp parallel num_threads(nthr)
f(anakin_get_thread_num(), anakin_get_num_threads());
#elif ANAKIN_THR == ANAKIN_THR_TBB
if (nthr == 1) { f(0, 1); return; }
tbb::parallel_for(0, nthr, [&](int ithr) { f(ithr, nthr); });
#endif
}
template <typename T, typename U>
inline void balance211(T n, U team, U tid, T &n_start, T &n_end) {
T n_min = 1;
T &n_my = n_end;
if (team <= 1 || n == 0) {
n_start = 0;
n_my = n;
} else if (n_min == 1) {
// team = T1 + T2
// n = T1*n1 + T2*n2 (n1 - n2 = 1)
T n1 = (n + (T)team - 1) / (T)team;
T n2 = n1 - 1;
T T1 = n - n2 * (T)team;
n_my = (T)tid < T1 ? n1 : n2;
n_start = (T)tid <= T1 ? tid * n1 : T1 * n1 + ((T)tid - T1) * n2;
}
n_end += n_start;
}
template<typename T>
inline T nd_iterator_init(T start) {
return start;
}
template<typename T, typename U, typename W, typename... Args>
inline T nd_iterator_init(T start, U& x, const W& X, Args&& ... tuple) {
start = nd_iterator_init(start, std::forward<Args>(tuple)...);
x = start % X;
return start / X;
}
inline bool nd_iterator_step() {
return true;
}
template<typename U, typename W, typename... Args>
inline bool nd_iterator_step(U& x, const W& X, Args&& ... tuple) {
if (nd_iterator_step(std::forward<Args>(tuple)...)) {
x = (x + 1) % X;
return x == 0;
}
return false;
}
template <typename T0, typename T1, typename F>
inline void parallel_nd(const T0 D0, const T1 D1, F f) {
const size_t work_amount = (size_t)D0 * D1;
if (work_amount == 0) {
return;
}
#pragma omp parallel
{
const int ithr = anakin_get_thread_num();
const int nthr = anakin_get_num_threads();
size_t start{0}, end{0};
balance211(work_amount, nthr, ithr, start, end);
T0 d0{0};
T1 d1{0};
nd_iterator_init(start, d0, D0, d1, D1);
for (size_t iwork = start; iwork < end; ++iwork) {
f(d0, d1);
nd_iterator_step(d0, D0, d1, D1);
}
}
}
template <typename T0, typename T1, typename T2, typename F>
inline void parallel_nd(const T0 D0, const T1 D1, const T2 D2, F f) {
const size_t work_amount = (size_t)D0 * D1 * D2;
if (work_amount == 0) {
return;
}
#pragma omp parallel
{
const int ithr = anakin_get_thread_num();
const int nthr = anakin_get_num_threads();
size_t start{0}, end{0};
balance211(work_amount, nthr, ithr, start, end);
T0 d0{0};
T1 d1{0};
T2 d2{0};
nd_iterator_init(start, d0, D0, d1, D1, d2, D2);
for (size_t iwork = start; iwork < end; ++iwork) {
f(d0, d1, d2);
nd_iterator_step(d0, D0, d1, D1, d2, D2);
}
}
}
template<typename U, typename W, typename Y>
inline bool nd_iterator_jump(U& cur, const U end, W& x, const Y& X) {
U max_jump = end - cur;
U dim_jump = X - x;
if (dim_jump <= max_jump) {
x = 0;
cur += dim_jump;
return true;
} else {
cur += max_jump;
x += max_jump;
return false;
}
}
template<typename U, typename W, typename Y, typename... Args>
inline bool nd_iterator_jump(U& cur, const U end, W& x, const Y& X,
Args&& ... tuple) {
if (nd_iterator_jump(cur, end, std::forward<Args>(tuple)...)) {
x = (x + 1) % X;
return x == 0;
}
return false;
}
/* for_nd section */
template <typename T0, typename F>
void for_nd(const int ithr, const int nthr, const T0 &D0, F f) {
T0 d0{0}, end{0};
balance211(D0, nthr, ithr, d0, end);
for (; d0 < end; ++d0) f(d0);
}
template <typename T0, typename T1, typename F>
void for_nd(const int ithr, const int nthr, const T0 &D0, const T1 &D1, F f) {
const size_t work_amount = (size_t)D0 * D1;
if (work_amount == 0) return;
size_t start{0}, end{0};
balance211(work_amount, nthr, ithr, start, end);
T0 d0{0}; T1 d1{0};
nd_iterator_init(start, d0, D0, d1, D1);
for (size_t iwork = start; iwork < end; ++iwork) {
f(d0, d1);
nd_iterator_step(d0, D0, d1, D1);
}
}
template <typename T0, typename T1, typename T2, typename F>
void for_nd(const int ithr, const int nthr, const T0 &D0, const T1 &D1,
const T2 &D2, F f) {
const size_t work_amount = (size_t)D0 * D1 * D2;
if (work_amount == 0) return;
size_t start{0}, end{0};
balance211(work_amount, nthr, ithr, start, end);
T0 d0{0}; T1 d1{0}; T2 d2{0};
nd_iterator_init(start, d0, D0, d1, D1, d2, D2);
for (size_t iwork = start; iwork < end; ++iwork) {
f(d0, d1, d2);
nd_iterator_step(d0, D0, d1, D1, d2, D2);
}
}
template <typename T0, typename T1, typename T2, typename T3, typename F>
void for_nd(const int ithr, const int nthr, const T0 &D0, const T1 &D1,
const T2 &D2, const T3 &D3, F f) {
const size_t work_amount = (size_t)D0 * D1 * D2 * D3;
if (work_amount == 0) return;
size_t start{0}, end{0};
balance211(work_amount, nthr, ithr, start, end);
T0 d0{0}; T1 d1{0}; T2 d2{0}; T3 d3{0};
nd_iterator_init(start, d0, D0, d1, D1, d2, D2, d3, D3);
for (size_t iwork = start; iwork < end; ++iwork) {
f(d0, d1, d2, d3);
nd_iterator_step(d0, D0, d1, D1, d2, D2, d3, D3);
}
}
template <typename T0, typename T1, typename T2, typename T3, typename T4,
typename F>
void for_nd(const int ithr, const int nthr, const T0 &D0, const T1 &D1,
const T2 &D2, const T3 &D3, const T4 &D4, F f) {
const size_t work_amount = (size_t)D0 * D1 * D2 * D3 * D4;
if (work_amount == 0) return;
size_t start{0}, end{0};
balance211(work_amount, nthr, ithr, start, end);
T0 d0{0}; T1 d1{0}; T2 d2{0}; T3 d3{0}; T4 d4{0};
nd_iterator_init(start, d0, D0, d1, D1, d2, D2, d3, D3, d4, D4);
for (size_t iwork = start; iwork < end; ++iwork) {
f(d0, d1, d2, d3, d4);
nd_iterator_step(d0, D0, d1, D1, d2, D2, d3, D3, d4, D4);
}
}
template <typename T0, typename T1, typename T2, typename T3, typename T4,
typename T5, typename F>
void for_nd(const int ithr, const int nthr, const T0 &D0, const T1 &D1,
const T2 &D2, const T3 &D3, const T4 &D4, const T5 &D5, F f) {
const size_t work_amount = (size_t)D0 * D1 * D2 * D3 * D4 * D5;
if (work_amount == 0) return;
size_t start{0}, end{0};
balance211(work_amount, nthr, ithr, start, end);
T0 d0{0}; T1 d1{0}; T2 d2{0}; T3 d3{0}; T4 d4{0}; T5 d5{0};
nd_iterator_init(start, d0, D0, d1, D1, d2, D2, d3, D3, d4, D4, d5, D5);
for (size_t iwork = start; iwork < end; ++iwork) {
f(d0, d1, d2, d3, d4, d5);
nd_iterator_step(d0, D0, d1, D1, d2, D2, d3, D3, d4, D4, d5, D5);
}
}
/* parallel_nd and parallel_nd_in_omp section */
#if ANAKIN_THR != ANAKIN_THR_TBB
template <typename ...Args>
void parallel_nd(Args &&...args) {
#if ANAKIN_THR == ANAKIN_THR_SEQ
for_nd(0, 1, std::forward<Args>(args)...);
#elif ANAKIN_THR == ANAKIN_THR_OMP
# pragma omp parallel
for_nd(anakin_get_thread_num(), anakin_get_num_threads(),
std::forward<Args>(args)...);
#endif
}
#else // ANAKIN_THR != ANAKIN_THR_TBB
// gcc 4.8 has a bug with passing parameter pack to lambdas.
// So have to explicitly instantiate all the cases.
template <typename T0, typename F>
void parallel_nd(const T0 &D0, F f) {
const int nthr = anakin_get_max_threads();
tbb::parallel_for(0, nthr, [&](int ithr) {
for_nd(ithr, nthr, D0, f);
});
}
template <typename T0, typename T1, typename F>
void parallel_nd(const T0 &D0, const T1 &D1, F f) {
const int nthr = anakin_get_max_threads();
tbb::parallel_for(0, nthr, [&](int ithr) {
for_nd(ithr, nthr, D0, D1, f);
});
}
template <typename T0, typename T1, typename T2, typename F>
void parallel_nd(const T0 &D0, const T1 &D1, const T2 &D2, F f) {
const int nthr = anakin_get_max_threads();
tbb::parallel_for(0, nthr, [&](int ithr) {
for_nd(ithr, nthr, D0, D1, D2, f);
});
}
template <typename T0, typename T1, typename T2, typename T3, typename F>
void parallel_nd(const T0 &D0, const T1 &D1, const T2 &D2, const T3 &D3, F f) {
const int nthr = anakin_get_max_threads();
tbb::parallel_for(0, nthr, [&](int ithr) {
for_nd(ithr, nthr, D0, D1, D2, D3, f);
});
}
template <typename T0, typename T1, typename T2, typename T3, typename T4,
typename F>
void parallel_nd(const T0 &D0, const T1 &D1, const T2 &D2, const T3 &D3,
const T4 &D4, F f) {
const int nthr = anakin_get_max_threads();
tbb::parallel_for(0, nthr, [&](int ithr) {
for_nd(ithr, nthr, D0, D1, D2, D3, D4, f);
});
}
template <typename T0, typename T1, typename T2, typename T3, typename T4,
typename T5, typename F>
void parallel_nd(const T0 &D0, const T1 &D1, const T2 &D2, const T3 &D3,
const T4 &D4, const T5 &D5, F f) {
const int nthr = anakin_get_max_threads();
tbb::parallel_for(0, nthr, [&](int ithr) {
for_nd(ithr, nthr, D0, D1, D2, D3, D4, D5, f);
});
}
#endif
template <typename ...Args>
void parallel_nd_in_omp(Args &&...args) {
#if ANAKIN_THR == ANAKIN_THR_SEQ
for_nd(0, 1, std::forward<Args>(args)...);
#elif ANAKIN_THR == ANAKIN_THR_OMP
for_nd(anakin_get_thread_num(), anakin_get_num_threads(),
std::forward<Args>(args)...);
#elif ANAKIN_THR == ANAKIN_THR_TBB
assert(!"unsupported parallel_nd_in_omp()");
#endif
}
} // namespace saber
} // namespace anakin
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% John Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/color-private.h"
#include "magick/cache.h"
#include "magick/draw.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/resample.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/transform.h"
#include "magick/signature-private.h"
#include "magick/utility.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
InterpolatePixelMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterTypes
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
MagickPixelPacket
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter, 1.0);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
register ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
resample_filter=(ResampleFilter *) AcquireMagickMemory(
sizeof(*resample_filter));
if (resample_filter == (ResampleFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireCacheView(resample_filter->image);
resample_filter->debug=IsEventLogging();
resample_filter->signature=MagickSignature;
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined = MagickFalse;
/* initialise the resampling filter settings */
SetResampleFilter(resample_filter, image->filter, image->blur);
(void) SetResampleFilterInterpolateMethod(resample_filter,
image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,MagickPixelPacket *pixel)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
MagickPixelPacket *pixel)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
register double weight;
register const PixelPacket *pixels;
register const IndexPacket *indexes;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickSignature);
status=MagickTrue;
/* GetMagickPixelPacket(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image?
And is that area a simple solid color - then return that color
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+32.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+32.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* whole area is a solid color -- just return that color */
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
return(status);
}
/*
Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there more direct way? */
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireCacheView(average_image);
pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const PixelPacket *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view);
SetMagickPixelPacket(resample_filter->image,pixels,indexes,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is avergae of image average half background */
/* FUTURE: replace with a 50% blend of both pixels */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->average_pixel.opacity));
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->image->background_color.opacity));
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.opacity +=
resample_filter->image->background_color.opacity;
divisor_c += weight;
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.opacity /= 2;
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->matte != MagickFalse) pixel->opacity = 0.0;
if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewVirtualIndexQueue(resample_filter->view);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
pixel->opacity += weight*pixels->opacity;
divisor_m += weight;
if (pixel->matte != MagickFalse)
weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity));
pixel->red += weight*pixels->red;
pixel->green += weight*pixels->green;
pixel->blue += weight*pixels->blue;
if (pixel->colorspace == CMYKColorspace)
pixel->index += weight*(*indexes);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels++;
indexes++;
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 ) {
/* not enough pixels in resampling, resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->opacity = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity);
divisor_c = 1.0/divisor_c;
pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
const double sqrt_discriminant = sqrt(discriminant);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major = MagickHuge;
else
Major = sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickHuge ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support).
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ register double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The default Filter, is Gaussian, which is the standard filter used by the
% original paper on the Elliptical Weighted Everage Algorithm. However other
% filters can also be used.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterTypes filter,const double blur)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
% o blur: filter blur factor (radial scaling) for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterTypes filter,const double blur)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
if ( filter == PointFilter )
{
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Set a default cylindrical filter of a 'low blur' Jinc windowed Jinc */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,blur,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL)
{
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to default EWA gaussian filter");
resample_filter->filter = PointFilter;
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/* This is old code kept as a reference only. It is very wrong,
and I don't understand exactly what it was attempting to do.
*/
/*
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The above came from some reference code provided by Fred Weinhaus
and seems to have been a guess that was appropriate for its use
in a 3d perspective landscape mapping program.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
break;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
if (IsMagickTrue(GetImageArtifact(resample_filter->image,"resample:verbose")) )
{
/* Debug output of the filter weighting LUT
Gnuplot the LUT with hoizontal adjusted to 'r' using...
plot [0:2][-.2:1] "lut.dat" using (sqrt($0/1024)*2):1 with lines
The filter values is normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values)\n", WLUT_WIDTH);
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# And the whole table represents the filters support.\n");
printf("\n"); /* generates a 'break' in gnuplot if multiple outputs */
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
}
/* output the above once only for each image, and each setting */
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const InterpolatePixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
mandelbrot.c | /* vim: set ft=c sw=4 ts=4: */
/* mandelbrot.c
* Mandelbrot set calculation in C */
#include "mandelbrot.h"
#include <complex.h>
#ifdef _MSC_VER
typedef _Dcomplex dcplx;
#else
typedef double complex dcplx;
#endif
#define R_MAX 2.0
#define N_MAX 255
#ifdef _MSC_VER
unsigned calc_value(dcplx const z0) {
dcplx z = _Cbuild(0.0, 0.0);
for (unsigned n = N_MAX; n > 0; --n) {
z = _Cbuild(creal(z) * creal(z) - cimag(z) * cimag(z) + creal(z0),
2 * creal(z) * cimag(z) + cimag(z0));
if (cabs(z) > R_MAX)
return n;
}
return 0;
}
#else
unsigned calc_value(dcplx const z0) {
dcplx z = 0 + 0 * I;
for (unsigned n = N_MAX; n > 0; --n) {
z = z * z + z0;
if (cabs(z) > R_MAX)
return n;
}
return 0;
}
#endif
void to_rgb(unsigned const val, char rgb[3]) {
rgb[0] = 5 * (val % 15);
rgb[1] = 32 * (val % 7);
rgb[2] = 8 * (val % 31);
}
struct image *create(size_t const width, size_t const height,
double const x_center, double const y_center,
double const pixel_size) {
struct image *img = 0;
char rgb[3];
img = img_create(width, height, 3);
if (!img)
return 0;
#ifdef _MSC_VER
const dcplx offset = _Cbuild(x_center - 0.5 * pixel_size * (double)width,
y_center + 0.5 * pixel_size * (double)height);
#else
const dcplx offset = x_center - 0.5 * pixel_size * (double)width +
I * (y_center + 0.5 * pixel_size * (double)height);
#endif
size_t y;
#pragma omp parallel for private(y)
for (y = 0; y < height; ++y) {
for (size_t x = 0; x < width; ++x) {
#ifdef _MSC_VER
dcplx z = _Cbuild((double)x * pixel_size + creal(offset),
-(double)y * pixel_size + cimag(offset));
#else
dcplx z = x * pixel_size - y * pixel_size * I + offset;
#endif
to_rgb(calc_value(z), rgb);
img_set_pixel(img, x, y, rgb);
}
}
return img;
}
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <random>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
namespace mxnet {
namespace op {
template<typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template<typename DType>
inline DType relu(DType x) {
return x > 0.0f ? static_cast<float>(x) : 0.0f;
}
template<typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const index_t cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (index_t i = 0; i < T; ++i) {
index_t t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (index_t jk = 0; jk < cell_size; ++jk) {
index_t j = jk / H;
index_t k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
const float dropout,
std::mt19937 &rnd_engine) { // NOLINT(runtime/references)
DType* dropout_random = rs;
DType* rs2 = dropout_random + (L - 1) * D * T * N * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const index_t b_size = 2 * H * 4;
const index_t r_size = D * T * N * H * 6;
const index_t y_offset = T * N * H * 5;
const index_t cell_size = N * H;
int idx = 0; // state & cell state's idx;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < L; ++i) {
const index_t input_size = i ? H * D : I;
const index_t w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, false, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, true, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
if (dropout > 0.0f) {
std::uniform_real_distribution<float> distribution(0, 1);
for (index_t j = 0; j < T * N * H * D; j++) {
if (distribution(rnd_engine) < dropout) {
dropout_random[i * T * N * H * D + j] = 0;
y.dptr_[j] = 0;
} else {
dropout_random[i * T * N * H * D + j] = 1.0f - dropout;
y.dptr_[j] = y.dptr_[j] / (1.0f - dropout);
}
}
}
x_ptr = y.dptr_;
rs2 += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = (rs2 + y_offset)[i];
}
}
template<typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const index_t T,
const index_t N,
const index_t I,
const int H,
const int P,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, (P ? P : H)));
Tensor<cpu, 2, DType> whr(w_ptr, Shape2(1, 1));
if (P > 0) whr = Tensor<cpu, 2, DType>(wh.dptr_ + P * 4 * H, Shape2(P, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> r(hy_ptr, Shape2(1, 1));
if (P > 0) r = Tensor<cpu, 2, DType>(hy_ptr, Shape2(N, P));
const int offset = bid ? H : 0;
const int proj_offset = bid ? P : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const index_t cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (index_t i = 0; i < T; ++i) {
index_t t = bid ? T - 1 - i : i;
if (P > 0) {
linalg_gemm(i ? r : hx, wh, yh_flat, alpha, beta, false, true);
} else {
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
if (P == 0) y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
if (P == 0) hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
c[j][k] = ct;
}
h[j][k] = ht;
}
if (P > 0) {
linalg_gemm(h, whr, r, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < N; ++j) {
std::memcpy(y[t][j].dptr_ + proj_offset, r[j].dptr_, P * sizeof(DType));
}
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const int P,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, P ? P : H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const index_t b_size = 2 * H * 4;
const index_t cell_size = N * H;
const index_t projection_size = (P ? P : H) * N;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const index_t input_size = i ? (P ? P : H) * D : I;
index_t w_size = (input_size + (P ? P : H)) * H * 4;
if (P > 0) {
w_size += P * H;
}
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, (P ? P : H) * D));
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H, P,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += projection_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H, P,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += projection_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
DType* tmp_buf,
bool bid,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
const Tensor<cpu, 3, DType> &dy,
const Tensor<cpu, 2, DType> &dx,
const Tensor<cpu, 2, DType> &dhx,
const Tensor<cpu, 2, DType> &dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * 4 * H; ++i) {
dwh.dptr_[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 4 * H; ++i) {
dbx.dptr_[i] = 0;
dbh.dptr_[i] = 0;
}
}
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const DType beta2 = 2.0;
const index_t cell_size = N * H;
if (dhy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < cell_size; ++i) {
dh.dptr_[i] = dhy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < cell_size; ++i) {
dh.dptr_[i] = 0;
}
}
if (dcy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < cell_size; ++i) {
dc.dptr_[i] = dcy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < cell_size; ++i) {
dc.dptr_[i] = 0;
}
}
for (index_t i = T - 1; i >= 0; --i) {
index_t t = bid ? T - 1 - i : i;
index_t tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (index_t jk = 0; jk < cell_size; ++jk) {
index_t j = jk / H;
index_t k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
if (req_statecell != kNullOp || i > 0) {
dcnext[j][k] = dc[j][k] * ft;
}
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
if (req_state != kNullOp || i > 0) {
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
}
if (req_params != kNullOp) {
if (req_params != kAddTo) {
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
} else {
linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false);
// generate dwx every time step for AddTo
Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4));
linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false);
}
}
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
if (req_data != kNullOp) {
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
}
if (req_params != kNullOp && req_params != kAddTo) {
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
}
const index_t row = T * N;
const index_t col = H * 4;
if (req_params != kNullOp) {
if (req_params != kAddTo) {
for (index_t i = 0; i < row; ++i) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < col * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t j = 0; j < col; ++j) {
for (index_t i = 0; i < N; ++i) {
tmp_dbx[j][t] += dyx[t * N + i][j];
tmp_dbh[j][t] = tmp_dbx[j][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t j = 0; j < col; ++j) {
dbx[j] += tmp_dbx[j][t] + dbx[j];
dbh[j] += tmp_dbh[j][t] + dbh[j];
}
}
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell,
const float dropout) {
DType* dropout_random = rs + (L - 1) * D * T * N * H;
DType* rs2 = rs + (L - 1) * D * T * N * H;
DType* tmp_buf = ws;
DType* ws2 = tmp_buf + 8 * T * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const index_t b_size = 2 * H * 4;
const index_t r_size = D * T * N * H * 6;
const index_t y_offset = T * N * H * 5;
const index_t w_size1 = (I + H) * H * 4; // first layer
const index_t w_size2 = (D * H + H) * H * 4; // other layers
const index_t cell_size = N * H;
const index_t y_size = T * N * H * D;
DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const index_t input_size = i ? H * D : I;
const index_t w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs2 + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : nullptr;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : nullptr;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : nullptr;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : nullptr;
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
// Prevent overwritting dy while calculating dx in left2right layer
const int loop_iteration = (L - 1) - i;
dy_tmp_ptr = loop_iteration % 2 ? dy_tmp_ptr - y_size : dy_tmp_ptr + y_size;
}
if (dropout > 0.0f && i > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (index_t j = 0; j < T * N * D * H; j++) {
if (dropout_random[j] == 0) {
dx.dptr_[j] = 0;
} else {
dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout);
}
}
}
dy_ptr = dx.dptr_;
}
}
template<typename DType>
void GruForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gemmC2 + N * 3 * H;
DType* zt = rt + N * H;
DType* nt = zt + N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2: nullptr;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (index_t t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t rtb = i * 3 * H;
index_t ztb = i * 3 * H + H;
index_t ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t rtb = i * 3 * H;
index_t ztb = i * 3 * H + H;
index_t ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j]+ back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l);
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
}
template<typename DType>
void GruForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gateR;
DType* zt = gateZ;
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2 : nullptr;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_Mnh = Mnh + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (index_t t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * 3 * H;
DType* Mnht = Mnh + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t rtb = i * 3 * H;
index_t ztb = i * 3 * H + H;
index_t ntb = i * 3 * H + 2 * H;
Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
rt = back_gateR + (T - 1 - t) * N * H;
zt = back_gateZ + (T - 1 - t) * N * H;
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t rtb = i * 3 * H;
index_t ztb = i * 3 * H + H;
index_t ntb = i * 3 * H + 2 * H;
back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout,
std::mt19937 &rnd_engine) { // NOLINT(runtime/references)
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateR_l = rs;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
std::uniform_real_distribution<float> distribution(0, 1);
for (index_t i = 0; i < T * N * I; i++) {
if (distribution(rnd_engine) < dropout) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l);
gateR_l = gateR_l + T * D * N * H;
gateZ_l = gateZ_l + T * D * N * H;
gateN_l = gateN_l + T * D * N * H;
Mnh_l = Mnh_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void GruBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* rt;
DType* zt;
DType* nt;
DType* dat;
DType* dart;
DType* dar = ws; // [T, N, 3 * H]
DType* da = dar + T * N * 3 * H; // [T, N, 3 * H]
DType* dht1 = da + T * N * 3 * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* Mnht = Mnh;
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_Mnht = Mnh + T * N * H;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_dwx = dwx + I * 3 * H + H * 3 * H;
DType* back_dwh = dwh + I * 3 * H + H * 3 * H;
DType* back_dbx = dbx + 3 * H * 2;
DType* back_dbh = dbh + 3 * H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * 3 * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * 3 * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (index_t t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
Mnht = Mnh + t * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) *
zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
dht1[id] = dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (index_t j = 0; j < N * T; ++j) {
dbx[i] += da[j * 3 * H + i];
dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (index_t j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] += tmp_dbh[i][t] + dbh[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (index_t t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
rt = back_gateR + t * N * H;
zt = back_gateZ + t * N * H;
nt = back_gateN + t * N * H;
back_Mnht = Mnh + (T + t) * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t nid = i * 3 * H + 2 * H + j;
index_t zid = i * 3 * H + H + j;
index_t rid = i * 3 * H + j;
index_t id = i * H + j;
dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] -
nt[id]) * zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
back_dht1[id] = back_dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (index_t j = 0; j < N * T; ++j) {
back_dbx[i] += da[j * 3 * H + i];
back_dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (index_t j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] += tmp_dbh[i][t] + back_dbh[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void GruBackward(DType* ws,
DType* rs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H * 3;
DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* gateR_l = rs + (L - 1) * T * D * N * H;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H * 3;
} else {
wh_l = wh_l + (D * H) * H * 3;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H * 3;
} else {
dwh_l = dwx_l + (D * H) * H * 3;
}
DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2;
DType* dbh_l = dbx_l + 3 * H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
index_t inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l,
dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l,
dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateR_l = gateR_l - T * D * N * H;
gateZ_l = gateZ_l - T * D * N * H;
gateN_l = gateN_l - T * D * N * H;
Mnh_l = Mnh_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_tmp - T * N * H * D;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * 3 * D;
wh_l = wx_l + inputsize * 3 * H;
dwx_l = dwx_l - (inputsize + H) * H * 3 * D;
dwh_l = dwx_l + inputsize * 3 * H;
} else {
wx_l = wx_l - (I + H) * H * 3 * D;
wh_l = wx_l + I * 3 * H;
dwx_l = dwx_l - (I + H) * H * 3 * D;
dwh_l = dwx_l + I * 3 * H;
}
dbx_l = dbx_l - D * 3 * H * 2;
dbh_l = dbx_l + 3 * H;
}
}
}
template<typename DType>
void VanillaRNNForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2: nullptr;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (index_t t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t tb = i * H;
if (mode == 1) {
ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t tb = i * H;
if (mode == 1) {
back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
back_ht[i * D * H + j] = relu(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l,
hy_l, mode);
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
}
template<typename DType>
void VanillaRNNForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateN,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2 : nullptr;
DType* back_gateN = gateN + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (index_t t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t tb = i * H;
if (mode == 1) {
nt[tb + j] = ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j];
ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t tb = i * H;
if (mode == 1) {
nt[tb + j] = back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j];
back_ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout,
int mode,
std::mt19937 &rnd_engine) { // NOLINT(runtime/references)
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateN_l = rs;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
std::uniform_real_distribution<float> distribution(0, 1);
for (index_t i = 0; i < T * N * I; i++) {
if (distribution(rnd_engine) < dropout) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateN_l, y_l, hy_l, mode);
gateN_l = gateN_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void VanillaRNNBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateN,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state,
int mode) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* dart;
DType* nt;
DType* dar = ws; // [T, N, H]
DType* dht1 = dar + T * N * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_dwx = dwx + I * H + H * H;
DType* back_dwh = dwh + I * H + H * H;
DType* back_dbx = dbx + H * 2;
DType* back_dbh = dbh + H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (index_t t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
nt = gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t id = i * H + j;
if (mode == 1) {
dart[id] = dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f;
}
dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (index_t j = 0; j < N * T; ++j) {
dbx[i] += dar[j * H + i];
dbh[i] = dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (index_t j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] = dbx[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (index_t t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
nt = back_gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t id = i * H + j;
if (mode == 1) {
dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f;
}
back_dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (index_t j = 0; j < N * T; ++j) {
back_dbx[i] += dar[j * H + i];
back_dbh[i] = back_dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (index_t j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] = back_dbx[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void VanillaRNNBackward(DType* ws,
DType* rs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H;
DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* gateN_l = rs + (L - 1) * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H;
} else {
wh_l = wh_l + (D * H) * H;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H;
} else {
dwh_l = dwx_l + (D * H) * H;
}
DType* dbx_l = dbx + (L - 1) * D * H * 2;
DType* dbh_l = dbx_l + H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
index_t inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
VanillaRNNBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l,
y_l, dy_l, dhy_l, gateN_l, dx_l, dhx_l, dwx_l, dwh_l,
dbx_l, dbh_l, req_data, req_params, req_state, mode);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateN_l = gateN_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * D;
wh_l = wx_l + inputsize * H;
dwx_l = dwx_l - (inputsize + H) * H * D;
dwh_l = dwx_l + inputsize * H;
} else {
wx_l = wx_l - (I + H) * H * D;
wh_l = wx_l + I * H;
dwx_l = dwx_l - (I + H) * H * D;
dwh_l = dwx_l + I * H;
}
dbx_l = dbx_l - D * H * 2;
dbh_l = dbx_l + H;
}
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
GB_unop__identity_fc64_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_fc32)
// op(A') function: GB (_unop_tran__identity_fc64_fc32)
// C type: GxB_FC64_t
// A type: GxB_FC32_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_fc32)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hmap_mk_tid.c | /*
* Copyright (c) 2019 Ramesh Subramonian <subramonian@gmail.com>
* All rights reserved.
*
* Use is subject to license terms, as specified in the LICENSE file.
*/
//------------------------------------------------------
//START_INCLUDES
#include "hmap_common.h"
//STOP_INCLUDES
#include "_hmap_mk_tid.h"
/* Ideally, we want to distribute the work to the threads so that
* 1) they never update the same cell
* 2) they (ideally) have large contiguous regions which they own i.e.,
* only they write in that region
Dividing based on hashes gives us 1)
Dividing based on locs gives us 2)
However, since 1) is more important than 2), we went with 1)
Note that locs doesn't give you the location of a key.
It only gives you a starting point for the hunt for the location of a key
*/
//START_FOR_CDEF
int
hmap_mk_tid(
uint32_t *hashes, // input [nkeys]
uint32_t nkeys, // input
uint32_t nT, // input , number of threads
uint8_t *tids // output [nkeys]
)
//STOP_FOR_CDEF
{
int status = 0;
int chunk_size = 1024;
uint64_t divinfo = fast_div32_init(nT);
#pragma omp parallel for schedule(static, chunk_size)
for ( uint32_t i = 0; i < nkeys; i++ ) {
tids[i] = fast_rem32(hashes[i], nT, divinfo);
}
return status;
}
|
GB_add_phase0.c | //------------------------------------------------------------------------------
// GB_add_phase0: find vectors of C to compute for C=A+B or C<M>=A+B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// The eWise add of two matrices, C=A+B, C<M>=A+B, or C<!M>=A+B starts with
// this phase, which determines which vectors of C need to be computed.
// This phase is also used for GB_masker.
// On input, A and B are the two matrices being added, and M is the optional
// mask matrix (not complemented). The complemented mask is handed in GB_mask,
// not here.
// The A matrix can be sparse, hypersparse, slice, or hyperslice. The B matrix
// can only be sparse or hypersparse. See GB_wait, which can pass in A as any
// of the four formats. In this case, no mask is present.
// On output, an integer (Cnvec) a boolean (Ch_to_Mh) and up to 3 arrays are
// returned, either NULL or of size Cnvec. Let n = A->vdim be the vector
// dimension of A, B, M and C.
// Ch: the list of vectors to compute. If not NULL, Ch [k] = j is the
// kth vector in C to compute, which will become the hyperlist C->h of C.
// Note that some of these vectors may turn out to be empty, because of
// the mask, or because the vector j appeared in A or B, but is empty.
// It is pruned at the end of GB_add_phase2. If Ch is NULL then it is an
// implicit list of size n, and Ch [k] == k for all k = 0:n-1. In this
// case, C will be a standard matrix, not hypersparse. Thus, the kth
// vector is j = (Ch == NULL) ? k : Ch [k].
// Ch is freed by GB_add if phase1 fails. phase2 either frees it or
// transplants it into C.
// Ch_is_Mh: true if the mask M is present, hypersparse, and not
// complemented, false otherwise. In this case Ch is a deep copy of Mh.
// Only GB_add uses this option; it is not used by GB_masker (Ch_is_Mh
// is always false for GB_masker). This is determined by passing in
// p_Ch_is_Mh as a NULL or non-NULL pointer.
// C_to_A: if A is hypersparse, then C_to_A [k] = kA if the kth vector, j
// = (Ch == NULL) ? k : Ch [k] appears in A, as j = Ah [kA]. If j does
// not appear in A, then C_to_A [k] = -1. If A is not hypersparse, then
// C_to_A is returned as NULL.
// C_to_B: if B is hypersparse, then C_to_B [k] = kB if the kth vector, j
// = (Ch == NULL) ? k : Ch [k] appears in B, as j = Bh [kB]. If j does
// not appear in B, then C_to_B [k] = -1. If B is not hypersparse, then
// C_to_B is returned as NULL.
// C_to_M: if M is hypersparse, and Ch_is_Mh is false, then C_to_M [k] =
// kM if the kth vector, j = (Ch == NULL) ? k : Ch [k] appears in M, as j
// = Mh [kM]. If j does not appear in M, then C_to_M [k] = -1. If M is
// not hypersparse, then C_to_M is returned as NULL.
#include "GB_add.h"
//------------------------------------------------------------------------------
// GB_allocate_result
//------------------------------------------------------------------------------
static inline bool GB_allocate_result
(
int64_t Cnvec,
int64_t *restrict *Ch_handle,
int64_t *restrict *C_to_M_handle,
int64_t *restrict *C_to_A_handle,
int64_t *restrict *C_to_B_handle
)
{
bool ok = true ;
if (Ch_handle != NULL)
{
GB_MALLOC_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ;
ok = (*Ch_handle != NULL) ;
}
if (C_to_M_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_M_handle != NULL) ;
}
if (C_to_A_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_A_handle != NULL) ;
}
if (C_to_B_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_B_handle != NULL) ;
}
if (!ok)
{
// out of memory
if (Ch_handle != NULL)
{
GB_FREE_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_M_handle != NULL)
{
GB_FREE_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_A_handle != NULL)
{
GB_FREE_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_B_handle != NULL)
{
GB_FREE_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ;
}
}
return (ok) ;
}
//------------------------------------------------------------------------------
// GB_add_phase0: find the vectors of C for C<M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB_add_phase0 // find vectors in C for C=A+B or C<M>=A+B
(
int64_t *p_Cnvec, // # of vectors to compute in C
int64_t *restrict *Ch_handle, // Ch: size Cnvec, or NULL
int64_t *restrict *C_to_M_handle, // C_to_M: size Cnvec, or NULL
int64_t *restrict *C_to_A_handle, // C_to_A: size Cnvec, or NULL
int64_t *restrict *C_to_B_handle, // C_to_B: of size Cnvec, or NULL
bool *p_Ch_is_Mh, // if true, then Ch == Mh
const GrB_Matrix M, // optional mask, may be NULL; not complemented
const GrB_Matrix A, // standard, hypersparse, slice, or hyperslice
const GrB_Matrix B, // standard or hypersparse; never a slice
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Cnvec != NULL) ;
ASSERT (Ch_handle != NULL) ;
ASSERT (C_to_A_handle != NULL) ;
ASSERT (C_to_B_handle != NULL) ;
ASSERT_OK (GB_check (A, "A for add phase0", GB0)) ;
ASSERT_OK (GB_check (B, "B for add phase0", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (M, "M for add phase0", GB0)) ;
ASSERT (A->vdim == B->vdim) ;
ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
int64_t *restrict Ch = NULL ;
int64_t *restrict C_to_M = NULL ;
int64_t *restrict C_to_A = NULL ;
int64_t *restrict C_to_B = NULL ;
(*Ch_handle) = NULL ;
(*C_to_A_handle) = NULL ;
(*C_to_B_handle) = NULL ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = NULL ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = 1 ; // nthreads depends on Cnvec, computed below
//--------------------------------------------------------------------------
// get content of M, A, and B
//--------------------------------------------------------------------------
int64_t Cnvec ;
int64_t n = A->vdim ;
int64_t Anvec = A->nvec ;
bool A_is_hyper = A->is_hyper ;
bool A_is_slice = A->is_slice ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = (A_is_hyper) ? A->h : NULL ;
const int64_t A_hfirst = A->hfirst ;
#define GB_Ah(k) (A_is_hyper ? Ah [k] : (A_hfirst + (k)))
int64_t Bnvec = B->nvec ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
bool B_is_hyper = B->is_hyper ;
ASSERT (!B->is_slice) ;
int64_t Mnvec = 0 ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mh = NULL ;
bool M_is_hyper = false ;
if (M != NULL)
{
Mnvec = M->nvec ;
Mp = M->p ;
Mh = M->h ;
M_is_hyper = M->is_hyper ;
ASSERT (!M->is_slice) ;
}
// For GB_add, if M is present, hypersparse, and not complemented, then C
// will be hypersparse, and it will have set of vectors as M (Ch == Mh).
// For GB_masker, Ch is never equal to Mh.
bool Ch_is_Mh = (p_Ch_is_Mh != NULL) && (M != NULL && M_is_hyper) ;
//--------------------------------------------------------------------------
// find the set union of the non-empty vectors of A and B
//--------------------------------------------------------------------------
if (Ch_is_Mh)
{
//----------------------------------------------------------------------
// C is hypersparse, with the same vectors as the hypersparse M
//----------------------------------------------------------------------
// This step is done for GB_add only, not GB_masker.
// GB_wait is the only place where A may be a slice, and it does not
// use a mask. So this phase can ignore the case where A is a slice.
Cnvec = Mnvec ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
ASSERT (!A_is_slice) ;
if (!GB_allocate_result (Cnvec, &Ch, NULL,
(A_is_hyper) ? (&C_to_A) : NULL, (B_is_hyper) ? (&C_to_B) : NULL))
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
// copy Mh into Ch. Ch is Mh so C_to_M is not needed.
GB_memcpy (Ch, Mh, Mnvec * sizeof (int64_t), nthreads) ;
// construct the mapping from C to A and B, if they are hypersparse
if (A_is_hyper || B_is_hyper)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
if (A_is_hyper)
{
// C_to_A [k] = kA if Ah [kA] == j and A(:,j) is non-empty
int64_t kA = 0, pA, pA_end ;
GB_lookup (true, Ah, Ap, &kA, Anvec-1, j, &pA, &pA_end) ;
C_to_A [k] = (pA < pA_end) ? kA : -1 ;
}
if (B_is_hyper)
{
// C_to_B [k] = kB if Bh [kB] == j and B(:,j) is non-empty
int64_t kB = 0, pB, pB_end ;
GB_lookup (true, Bh, Bp, &kB, Bnvec-1, j, &pB, &pB_end) ;
C_to_B [k] = (pB < pB_end) ? kB : -1 ;
}
}
}
}
else if ((A_is_hyper || A_is_slice) && B_is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse or a hyperslice, and B is hypersparse
//----------------------------------------------------------------------
// Ch is the set union of Ah and Bh. This is handled with a parallel
// merge, since Ah and Bh are both sorted lists.
//----------------------------------------------------------------------
// phase 0: create the tasks
//----------------------------------------------------------------------
double work = GB_IMIN (Anvec + Bnvec, n) ;
nthreads = GB_nthreads (work, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
ntasks = GB_IMIN (ntasks, work) ;
int64_t kA_start [ntasks+1] ;
int64_t kB_start [ntasks+1] ;
int64_t kC_start [ntasks+1] ;
kA_start [0] = (Anvec == 0) ? -1 : 0 ;
kB_start [0] = (Bnvec == 0) ? -1 : 0 ;
kA_start [ntasks] = (Anvec == 0) ? -1 : Anvec ;
kB_start [ntasks] = (Bnvec == 0) ? -1 : Bnvec ;
for (int taskid = 1 ; taskid < ntasks ; taskid++)
{
// create tasks: A and B are both hyper
double target_work = ((ntasks-taskid) * work) / ntasks ;
GB_slice_vector (NULL, NULL,
&(kA_start [taskid]), &(kB_start [taskid]),
0, 0, NULL, // Mi not present
0, Anvec, Ah, A_hfirst, // Ah, explicit or implicit list
0, Bnvec, Bh, // Bh, explicit list
n, // Ah and Bh have dimension n
target_work) ;
}
//----------------------------------------------------------------------
// phase 1: count the entries in the result of each task
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
int64_t kC = 0 ;
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// jA appears in A but not B
kA++ ;
}
else if (jB < jA)
{
// jB appears in B but not A
kB++ ;
}
else
{
// j = jA = jB appears in both A and B
kA++ ;
kB++ ;
}
}
kC_start [taskid] = kC + (kA_end - kA) + (kB_end - kB) ;
}
//----------------------------------------------------------------------
// phase 1b: cumulative sum of entries for each task
//----------------------------------------------------------------------
GB_cumsum (kC_start, ntasks, NULL, 1) ;
Cnvec = kC_start [ntasks] ;
//----------------------------------------------------------------------
// allocate the result
//----------------------------------------------------------------------
// C will be hypersparse, so Ch is allocated. The mask M is ignored
// for computing Ch. Ch is the set union of Ah and Bh.
if (!GB_allocate_result (Cnvec, &Ch,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, &C_to_B))
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// phase 2: compute the result
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kC = kC_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
// merge Ah and Bh into Ch
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = -1 ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
Ch [kC] = jB ;
C_to_A [kC] = -1 ; // jB does not appear in A
C_to_B [kC] = kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = kB++ ;
}
}
if (kA < kA_end)
{
// B is exhausted but A is not
for ( ; kA < kA_end ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
Ch [kC] = jA ;
C_to_A [kC] = kA ;
C_to_B [kC] = -1 ;
}
}
else if (kB < kB_end)
{
// A is exhausted but B is not
for ( ; kB < kB_end ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
Ch [kC] = jB ;
C_to_A [kC] = -1 ;
C_to_B [kC] = kB ;
}
}
ASSERT (kC == kC_start [taskid+1]) ;
}
//----------------------------------------------------------------------
// check result via a sequential merge
//----------------------------------------------------------------------
#ifdef GB_DEBUG
// merge Ah and Bh into Ch
int64_t kA = 0 ;
int64_t kB = 0 ;
int64_t kC = 0 ;
for ( ; kA < Anvec && kB < Bnvec ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == -1) ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ; // jB does not appear in A
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
}
if (kA < Anvec)
{
// B is exhausted but A is not
for ( ; kA < Anvec ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ;
ASSERT (C_to_B [kC] == -1) ;
}
}
else if (kB < Bnvec)
{
// A is exhausted but B is not
for ( ; kB < Bnvec ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ;
ASSERT (C_to_B [kC] == kB) ;
}
}
ASSERT (kC == Cnvec) ;
#endif
}
else if ((A_is_hyper || A_is_slice) && !B_is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse, B is standard
//----------------------------------------------------------------------
// C will be standard. Construct the C_to_A mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, NULL))
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t j = 0 ; j < n ; j++)
{
C_to_A [j] = -1 ;
}
// scatter Ah into C_to_A
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kA = 0 ; kA < Anvec ; kA++)
{
int64_t jA = GB_Ah (kA) ;
C_to_A [jA] = kA ;
}
}
else if (!(A_is_hyper || A_is_slice) && B_is_hyper)
{
//----------------------------------------------------------------------
// A is standard, B is hypersparse
//----------------------------------------------------------------------
// C will be standard. Construct the C_to_B mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, &C_to_B))
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t j = 0 ; j < n ; j++)
{
C_to_B [j] = -1 ;
}
// scatter Bh into C_to_B
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kB = 0 ; kB < Bnvec ; kB++)
{
int64_t jB = Bh [kB] ;
C_to_B [jB] = kB ;
}
}
else
{
//----------------------------------------------------------------------
// A and B are both standard
//----------------------------------------------------------------------
// C will be standard
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, NULL))
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// construct C_to_M if needed
//--------------------------------------------------------------------------
if (C_to_M != NULL)
{
if (Ch != NULL)
{
// C is hypersparse
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
// C_to_M [k] = kM if Mh [kM] == j and M(:,j) is non-empty
int64_t kM = 0, pM, pM_end ;
GB_lookup (true, Mh, Mp, &kM, Mnvec-1, j, &pM, &pM_end) ;
C_to_M [k] = (pM < pM_end) ? kM : -1 ;
}
}
else
{
// this case can occur only if M is present, complemented, and
// hypersparse, and C is standard.
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t j = 0 ; j < n ; j++)
{
C_to_M [j] = -1 ;
}
// scatter Mh into C_to_M
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kM = 0 ; kM < Mnvec ; kM++)
{
int64_t jM = Mh [kM] ;
C_to_M [jM] = kM ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
(*p_Cnvec ) = Cnvec ;
if (p_Ch_is_Mh != NULL)
{
// return Ch_is_Mh to GB_add. For GB_masker, Ch is never Mh.
(*p_Ch_is_Mh) = Ch_is_Mh ;
}
(*Ch_handle ) = Ch ;
(*C_to_A_handle) = C_to_A ;
(*C_to_B_handle) = C_to_B ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = C_to_M ;
}
//--------------------------------------------------------------------------
// The code below describes what the output contains:
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
ASSERT (A != NULL) ; // A and B are always present
ASSERT (B != NULL) ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < Cnvec ; k++)
{
// C(:,j) is in the list, as the kth vector
int64_t j ;
if (Ch == NULL)
{
// C will be constructed as standard sparse
j = k ;
}
else
{
// C will be constructed as hypersparse
j = Ch [k] ;
}
// vectors j in Ch are sorted, and in the range 0:n-1
ASSERT (j >= 0 && j < n) ;
ASSERT (j > jlast) ;
jlast = j ;
// see if A (:,j) exists
if (C_to_A != NULL)
{
// A is hypersparse, or a slice
ASSERT (A->is_hyper || A->is_slice) ;
int64_t kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
int64_t jA = GB_Ah (kA) ;
ASSERT (j == jA) ;
}
}
else
{
// A is in standard sparse form
// C_to_A exists only if A is hypersparse
ASSERT (!(A->is_hyper || A->is_slice)) ;
}
// see if B (:,j) exists
if (C_to_B != NULL)
{
// B is hypersparse
ASSERT (B->is_hyper) ;
int64_t kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
int64_t jB = B->h [kB] ;
ASSERT (j == jB) ;
}
}
else
{
// B is in standard sparse form
// C_to_B exists only if B is hypersparse
ASSERT (!B->is_hyper) ;
}
// see if M (:,j) exists
if (Ch_is_Mh)
{
// Ch is the same as Mh
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ;
ASSERT (C_to_M == NULL) ;
}
else if (C_to_M != NULL)
{
// M is present and hypersparse
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
int64_t kM = C_to_M [k] ;
ASSERT (kM >= -1 && kM < M->nvec) ;
if (kM >= 0)
{
int64_t jM = M->h [kM] ;
ASSERT (j == jM) ;
}
}
else
{
// M is not present, or in standard form
ASSERT (M == NULL || !(M->is_hyper)) ;
}
}
#endif
return (GrB_SUCCESS) ;
}
|
GB_binop__ne_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint16)
// A*D function (colscale): GB (_AxD__ne_uint16)
// D*A function (rowscale): GB (_DxB__ne_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint16)
// C=scalar+B GB (_bind1st__ne_uint16)
// C=scalar+B' GB (_bind1st_tran__ne_uint16)
// C=A+scalar GB (_bind2nd__ne_uint16)
// C=A'+scalar GB (_bind2nd_tran__ne_uint16)
// C type: bool
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT16 || GxB_NO_NE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
array_multiply_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
int main(int argc, char **argv) {
struct timespec ts_start, ts_end;
int size = 1e8;
int multiplier = 2;
int *a, *c;
int i;
float time_total;
/* Allocate memory for arrays */
a = malloc(size*sizeof(int));
c = malloc(size*sizeof(int));
/* Get start time */
clock_gettime(CLOCK_MONOTONIC, &ts_start);
#pragma omp parallel for
for (i = 0; i<size; i++) {
c[i] = multiplier * a[i];
}
/* Get end time */
clock_gettime(CLOCK_MONOTONIC, &ts_end);
time_total = (ts_end.tv_sec - ts_start.tv_sec)*1e9 + \
(ts_end.tv_nsec - ts_start.tv_nsec);
printf("Total time is %f ms\n", time_total/1e6);
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/channel.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/policy.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/registry.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short int
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[256],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op)
{
const char
*blend_mode;
switch (op)
{
case ColorBurnCompositeOp: blend_mode = "idiv"; break;
case ColorDodgeCompositeOp: blend_mode = "div "; break;
case ColorizeCompositeOp: blend_mode = "colr"; break;
case DarkenCompositeOp: blend_mode = "dark"; break;
case DifferenceCompositeOp: blend_mode = "diff"; break;
case DissolveCompositeOp: blend_mode = "diss"; break;
case ExclusionCompositeOp: blend_mode = "smud"; break;
case HardLightCompositeOp: blend_mode = "hLit"; break;
case HardMixCompositeOp: blend_mode = "hMix"; break;
case HueCompositeOp: blend_mode = "hue "; break;
case LightenCompositeOp: blend_mode = "lite"; break;
case LinearBurnCompositeOp: blend_mode = "lbrn"; break;
case LinearDodgeCompositeOp:blend_mode = "lddg"; break;
case LinearLightCompositeOp:blend_mode = "lLit"; break;
case LuminizeCompositeOp: blend_mode = "lum "; break;
case MultiplyCompositeOp: blend_mode = "mul "; break;
case OverCompositeOp: blend_mode = "norm"; break;
case OverlayCompositeOp: blend_mode = "over"; break;
case PinLightCompositeOp: blend_mode = "pLit"; break;
case SaturateCompositeOp: blend_mode = "sat "; break;
case ScreenCompositeOp: blend_mode = "scrn"; break;
case SoftLightCompositeOp: blend_mode = "sLit"; break;
case VividLightCompositeOp: blend_mode = "vLit"; break;
default: blend_mode = "norm"; break;
}
return(blend_mode);
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image, ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->matte == MagickFalse || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringNotFalse(option) == MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
gamma=QuantumScale*GetPixelAlpha(q);
if (gamma != 0.0 && gamma != 1.0)
{
SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma);
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == QuantumRange)
return(MagickTrue);
image->matte=MagickTrue;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity)));
else if (opacity > 0)
SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/
(MagickRealType) opacity)));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
MagickPixelPacket
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->matte=MagickTrue;
GetMagickPixelPacket(complete_mask,&color);
color.red=background;
SetImageColor(complete_mask,&color);
status=CompositeImage(complete_mask,OverCompositeOp,mask,
mask->page.x-image->page.x,mask->page.y-image->page.y);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->matte=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=GetPixelAlpha(q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha)));
else if (intensity > 0)
SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange));
q++;
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
else if (image->depth > 8)
return(2);
}
else
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static void ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image)
{
const unsigned char
*p;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return;
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,blocks);
(void) SetImageProfile(image,"8bim",profile);
profile=DestroyStringInfo(profile);
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if (name_length % 2 == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
return;
p=PushLongPixel(MSBEndian,p,&count);
if ((p+count) > (blocks+length))
return;
switch (id)
{
case 0x03ed:
{
char
value[MaxTextExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (count < 16)
return;
p=PushShortPixel(MSBEndian,p,&resolution);
image->x_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->x_resolution);
(void) SetImageProperty(image,"tiff:XResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->y_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->y_resolution);
(void) SetImageProperty(image,"tiff:YResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((count > 3) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return;
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,
PixelPacket *q,IndexPacket *indexes,ssize_t x)
{
if (image->storage_class == PseudoClass)
{
PixelPacket
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(indexes+x,ScaleQuantumToChar(pixel));
else
SetPixelIndex(indexes+x,ScaleQuantumToShort(pixel));
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
GetPixelIndex(indexes+x));
if ((type == 0) && (channels > 1))
return;
else
SetPixelAlpha(color,pixel);
SetPixelRGBO(q,color);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(q,pixel);
break;
}
case -2:
case 0:
{
SetPixelRed(q,pixel);
if (channels < 3 || type == -2)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
break;
}
case 1:
{
SetPixelGreen(q,pixel);
break;
}
case 2:
{
SetPixelBlue(q,pixel);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,pixel);
else
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,const size_t channels,
const size_t row,const ssize_t type,const unsigned char *pixels,
ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q++,indexes,x);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
count,
row_size;
ssize_t
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+512))
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
mask->matte=MagickFalse;
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
layer_info->mask.image=mask;
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MaxTextExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
{
layer_info->image->compose=NoCompositeOp;
(void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true");
}
if (psd_info->mode == CMYKMode)
SetImageColorspace(layer_info->image,CMYKColorspace);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
SetImageColorspace(layer_info->image,GRAYColorspace);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->matte=MagickTrue;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
InheritException(exception,&layer_info->image->exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateImage(layer_info->image,MagickFalse);
if (status != MagickFalse && layer_info->mask.image != (Image *) NULL)
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
(void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickFalse);
return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image* image,const PSDInfo* psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,i,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateImage(image,MagickFalse);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
ssize_t
count;
unsigned char
*data;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if (SetImageBackgroundColor(image) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (psd_info.mode == LabMode)
SetImageColorspace(image,LabColorspace);
if (psd_info.mode == CMYKMode)
{
SetImageColorspace(image,CMYKColorspace);
image->matte=psd_info.channels > 4 ? MagickTrue : MagickFalse;
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
SetImageColorspace(image,GRAYColorspace);
image->matte=psd_info.channels > 1 ? MagickTrue : MagickFalse;
}
else
image->matte=psd_info.channels > 3 ? MagickTrue : MagickFalse;
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if (psd_info.mode == DuotoneMode)
{
/*
Duotone image data; the format of this data is undocumented.
*/
data=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,(size_t) length,data);
data=(unsigned char *) RelinquishMagickMemory(data);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->matte=MagickFalse;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
if (has_merged_image != MagickFalse || GetImageListLength(image) == 1)
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) &&
(length != 0))
{
SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (GetImageListLength(image) == 1)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
SetImageAlphaChannel(image,TransparentAlphaChannel);
image->background_color.opacity=TransparentOpacity;
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PSB");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Large Document Format");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PSD");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Photoshop bitmap");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (next_image->compression == RLECompression)
{
length=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
length=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
length=WriteBlobMSBShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const PixelPacket
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1)
? MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,&image->exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(Image *image)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
}
return(compact_pixels);
}
static ssize_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsGrayImage(next_image,&next_image->exception) == MagickFalse)
channels=next_image->colorspace == CMYKColorspace ? 4 : 3;
if (next_image->matte != MagickFalse)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsGrayImage(next_image,&next_image->exception) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->matte != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->x_resolution+0.5;
y_resolution=2.54*65536.0*image->y_resolution+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->x_resolution+0.5;
y_resolution=65536.0*image->y_resolution+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)))
{
(void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(*p++);
key[1]=(*p++);
key[2]=(*p++);
key[3]=(*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
SetImageProfile(image,"psd:additional-info",info);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image)
{
char
layer_name[MaxTextExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->matte != MagickFalse)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,&image->exception) != MagickFalse))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorMatteType) && (image->storage_class == PseudoClass))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass);
if (image->colorspace != CMYKColorspace)
num_channels=(image->matte != MagickFalse ? 4UL : 3UL);
else
num_channels=(image->matte != MagickFalse ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsGrayImage(image,&image->exception) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsMonochromeImage(image,&image->exception) &&
(image->depth == 1) ? MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsGrayImage(image,&image->exception) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((MagickOffsetType) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *)NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
SetPSDSize(&psd_info,image,0);
SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->matte != MagickFalse)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
default_color=strlen(property) == 9 ? 255 : 0;
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1U;
if ((next_image->storage_class != PseudoClass) &&
(IsGrayImage(next_image,&next_image->exception) == MagickFalse))
channels=next_image->colorspace == CMYKColorspace ? 4U : 3U;
total_channels=channels;
if (next_image->matte != MagickFalse)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->matte != MagickFalse)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image->compose));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,
&image->exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ?
1 << 0x02 : 1); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image);
property=(const char *) GetImageProperty(next_image,"label");
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,&image->exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,mask->page.y);
size+=WriteBlobMSBSignedLong(image,mask->page.x);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->rows+
mask->page.y);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->columns+
mask->page.x);
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0);
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,
MagickFalse) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
rsvp_fmt_plug.c | /*
* Cracker for HMAC-MD5 and HMAC-SHA1 based authentication in RSVP.
*
* This software is Copyright (c) 2014 Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without#
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rsvp;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rsvp);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 4096
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 8192
#endif
#endif // __MIC__
#endif
#include "arch.h"
#include "md5.h"
#include "sha.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "rsvp"
#define FORMAT_NAME "HMAC-MD5 / HMAC-SHA1, RSVP, IS-IS"
#define FORMAT_TAG "$rsvp$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define HEXCHARS "0123456789abcdef"
#define MAX_SALT_SIZE 8192
// currently only 2 types, 1 for md5 and 2 for SHA1. Bump this
// number each type a type is added, and make sure the types
// are sequential.
#define MAX_TYPES 2
static struct fmt_tests tests[] = {
{"$rsvp$1$10010000ff0000ac002404010100000000000001d7e95bfa0000003a00000000000000000000000000000000000c0101c0a8011406000017000c0301c0a8010a020004020008050100007530000c0b01c0a8010a0000000000240c0200000007010000067f00000545fa000046fa000045fa0000000000007fffffff00300d020000000a010000080400000100000001060000014998968008000001000000000a000001000005dc05000000$636d8e6db5351fbc9dad620c5ec16c0b", "password12345"},
{"$rsvp$2$10010000ff0000b0002804010100000000000001d7e95bfa0000055d0000000000000000000000000000000000000000000c0101c0a8011406000017000c0301c0a8010a020004020008050100007530000c0b01c0a8010a0000000000240c0200000007010000067f00000545fa000046fa000045fa0000000000007fffffff00300d020000000a010000080400000100000001060000014998968008000001000000000a000001000005dc05000000$ab63f157e601742983b853f13a63bc4d4379a434", "JtR_kicks_ass"},
// IS-IS HMAC-MD5 hash
{"$rsvp$1$831b01000f01000001192168001005001e05d940192168001005010a1136000000000000000000000000000000008101cc0104034900018404c0a87805d30300000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000890000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000$ae116a4cff88a4b13b3ae14bf169ff5c", "password12345"},
// IS-IS HMAC-MD5 hash
{"$rsvp$1$831b01000f01000001192168001005001e05d940192168001005010a1136000000000000000000000000000000008101cc0104034900018404c0a87805d30300000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000890000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000$5048a1fe4ed87c32bc6c4af43095cae4", "1234567890"},
// IS-IS HMAC-MD5, isis-hmac-md5_key-1234.pcap
{"$rsvp$1$831401001101000301192168201101001b005a000104034900018102cc8ee50400000002e810fe800000000000000465fffffe000000f00f0000000004192168201104000000040a113600000000000000000000000000000000$44b62860b363f9adf60acdb9d66abe27", "1234"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
// when we add more types, they need to be sequential (next will be 3),
// AND we need to bump this to the count. Each type will use one of these
// to track whether it has build the first half of the hmac. The size
// of this array should be 1 more than the max number of types.
static int new_keys[MAX_TYPES+1];
// we make our crypt_out large enough for an SHA1 output now. Even though
// we only compare first BINARY_SIZE data.
static uint32_t (*crypt_out)[ (BINARY_SIZE+4) / sizeof(uint32_t)];
static SHA_CTX *ipad_ctx;
static SHA_CTX *opad_ctx;
static MD5_CTX *ipad_mctx;
static MD5_CTX *opad_mctx;
static struct custom_salt {
int type;
int salt_length;
unsigned char salt[MAX_SALT_SIZE];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
ipad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*opad_ctx));
opad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*opad_ctx));
ipad_mctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*opad_mctx));
opad_mctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*opad_mctx));
}
static void done(void)
{
MEM_FREE(opad_mctx);
MEM_FREE(ipad_mctx);
MEM_FREE(opad_ctx);
MEM_FREE(ipad_ctx);
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *strkeep;
int version;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return 0;
strkeep = strdup(ciphertext);
p = &strkeep[TAG_LENGTH];
if ((p = strtokm(p, "$")) == NULL) /* version */
goto err;
version = atoi(p);
if (version != 1 && version != 2)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (strlen(p) >= MAX_SALT_SIZE*2)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* hash */
goto err;
/* there is code that trim longer binary values, so we do not need to check for extra long */
if (strlen(p) < BINARY_SIZE*2)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(strkeep);
return 1;
err:;
MEM_FREE(strkeep);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
int i;
char *p, *q;
memset(&cs, 0, SALT_SIZE);
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
p = ciphertext;
cs.type = atoi(p);
p = p + 2;
q = strchr(p, '$') + 1;
cs.salt_length = (q - p) / 2;
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) |
atoi16[ARCH_INDEX(p[2 * i + 1])];
return (void*)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char buf[20];
if (cur_salt->type == 1) {
MD5_CTX ctx;
if (new_keys[cur_salt->type]) {
int i, len = strlen(saved_key[index]);
unsigned char *p = (unsigned char*)saved_key[index];
unsigned char pad[64];
if (len > 64) {
MD5_Init(&ctx);
MD5_Update(&ctx, p, len);
MD5_Final(buf, &ctx);
len = 16;
p = buf;
}
for (i = 0; i < len; ++i) {
pad[i] = p[i] ^ 0x36;
}
MD5_Init(&ipad_mctx[index]);
MD5_Update(&ipad_mctx[index], pad, len);
if (len < 64)
MD5_Update(&ipad_mctx[index], "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 64-len);
for (i = 0; i < len; ++i) {
pad[i] = p[i] ^ 0x5C;
}
MD5_Init(&opad_mctx[index]);
MD5_Update(&opad_mctx[index], pad, len);
if (len < 64)
MD5_Update(&opad_mctx[index], "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 64-len);
}
memcpy(&ctx, &ipad_mctx[index], sizeof(ctx));
MD5_Update(&ctx, cur_salt->salt, cur_salt->salt_length);
MD5_Final(buf, &ctx);
memcpy(&ctx, &opad_mctx[index], sizeof(ctx));
MD5_Update(&ctx, buf, 16);
MD5_Final((unsigned char*)(crypt_out[index]), &ctx);
} else if (cur_salt->type == 2) {
SHA_CTX ctx;
if (new_keys[cur_salt->type]) {
int i, len = strlen(saved_key[index]);
unsigned char *p = (unsigned char*)saved_key[index];
unsigned char pad[64];
if (len > 64) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, p, len);
SHA1_Final(buf, &ctx);
len = 20;
p = buf;
}
for (i = 0; i < len; ++i) {
pad[i] = p[i] ^ 0x36;
}
SHA1_Init(&ipad_ctx[index]);
SHA1_Update(&ipad_ctx[index], pad, len);
if (len < 64)
SHA1_Update(&ipad_ctx[index], "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 64-len);
for (i = 0; i < len; ++i) {
pad[i] = p[i] ^ 0x5C;
}
SHA1_Init(&opad_ctx[index]);
SHA1_Update(&opad_ctx[index], pad, len);
if (len < 64)
SHA1_Update(&opad_ctx[index], "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 64-len);
}
memcpy(&ctx, &ipad_ctx[index], sizeof(ctx));
SHA1_Update(&ctx, cur_salt->salt, cur_salt->salt_length);
SHA1_Final(buf, &ctx);
memcpy(&ctx, &opad_ctx[index], sizeof(ctx));
SHA1_Update(&ctx, buf, 20);
// NOTE, this writes 20 bytes. That is why we had to bump up the size of each crypt_out[] value,
// even though we only look at the first 16 bytes when comparing the saved binary.
SHA1_Final((unsigned char*)(crypt_out[index]), &ctx);
}
}
new_keys[cur_salt->type] = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void rsvp_set_key(char *key, int index)
{
saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key));
// Workaround for self-test code not working as IRL
new_keys[1] = new_keys[2] = 2;
}
static void clear_keys(void) {
int i;
for (i = 0; i <= MAX_TYPES; ++i)
new_keys[i] = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
/*
* report hash algorithm used for hmac as "tunable cost"
*/
static unsigned int rsvp_hash_type(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->type;
}
struct fmt_main fmt_rsvp = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"hash algorithm used for hmac [1:MD5 2:SHA1]"
},
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
rsvp_hash_type,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
rsvp_set_key,
get_key,
clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
omp_parallel_for_ordered.c | // RUN: %libomp-compile-and-run
// REQUIRES: !(abt && (clang || gcc))
#include <stdio.h>
#include "omp_testsuite.h"
static int last_i = 0;
int i;
#pragma omp threadprivate(i)
/* Variable ii is used to avoid problems with a threadprivate variable used as a loop
* index. See test omp_threadprivate_for.
*/
static int ii;
#pragma omp threadprivate(ii)
/*!
Utility function: returns true if the passed argument is larger than
the argument of the last call of this function.
*/
static int check_i_islarger2(int i)
{
int islarger;
islarger = (i > last_i);
last_i = i;
return (islarger);
}
int test_omp_parallel_for_ordered()
{
int sum;
int is_larger;
int known_sum;
int i;
sum = 0;
is_larger = 1;
last_i = 0;
#pragma omp parallel for schedule(static,1) private(i) ordered
for (i = 1; i < 100; i++) {
ii = i;
#pragma omp ordered
{
is_larger = check_i_islarger2 (ii) && is_larger;
sum = sum + ii;
}
}
known_sum = (99 * 100) / 2;
fprintf (stderr," known_sum = %d , sum = %d \n", known_sum, sum);
fprintf (stderr," is_larger = %d\n", is_larger);
return (known_sum == sum) && is_larger;
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_parallel_for_ordered()) {
num_failed++;
}
}
return num_failed;
}
|
RACIPELIB.c | /***********************************************************************
Random Circuit Perturbation (RACIPE) menthod
Copyright 2016 BIN HUANG <bh14@rice.edu>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Paper to cite:
Huang, Bin, Mingyang Lu, Dongya Jia, Eshel Ben-Jacob, Herbert Levine, and Jose N. Onuchic. "Interrogating the topological robustness of gene regulatory circuits by randomization." PLoS computational biology 13, no. 3 (2017): e1005456.
This code has used PCG Random Number Generation script by Melissa O'Neill
************************************************************************/
# include <stdlib.h>
# include <stdint.h>
# include <stdio.h>
# include <math.h>
# include <time.h>
# include <string.h>
# include <inttypes.h>
# include <omp.h>
# include "RACIPELIB.h"
# include "pcg_basic.h"
# include "rkf45.h"
/*********Global Variables to monitor Progress and No.of Threads ********/
int countstartmodels=0;
int countfinmodels=0;
int threads=3;
/*********Shared Functions*********/
// Generate random value in (minV, maxV) following uniform distribution
double randu(double minV, double maxV)
{
double u;
do {
u = ((double)pcg32_random()/UINT32_MAX);
} while (u==0);
return (minV + (maxV - minV)*u);
}
// Generate random value following guassian distribtuion N(m, stadvalue)
double randg(double m, double stdvalue)
{
double u1 = 0.0, u2 = 0.0, rsq = 0.0, fac = 0.0;
static double z1 = 0.0, z2 = 0.0;
static int call = 0;
if (call == 1){
call = !call;
return z2*stdvalue + m;
}
do {
u1 = 2.0*randu(0, 1) - 1;
u2 = 2.0*randu(0, 1) - 1;
rsq = u1*u1 + u2*u2;
} while (rsq >= 1.0 || rsq == 0.0);
fac = sqrt(-2.0*log(rsq)/rsq);
z1 = u1*fac;
z2 = u2*fac;
call = !call;
return z1*stdvalue + m;
}
// Generate random value following non-negative guassian distribtuion N(m, stadvalue)
double randpg(double m, double stdvalue)
{
double u = 0.0;
do {
u = randg(m, stdvalue);
} while (u <= 0);
return u;
}
// Generate random value following exponential distribution with mean equals to m
double randexp(double m)
{
double z = 0.0;
double exp_value = 0.0;
z = randu(0, 1);
exp_value = -m * log(z); // Compute exponential random variable using inversion method
return(exp_value);
}
// Generate random value for fold change parameter which need to be greater than or equal to 1
double randfd(double m, double stdvalue, int dist)
{
double u = 0.0;
switch (dist) {
case 2: // Guassian distribution
do {
u = randg(m, stdvalue);
} while (u < 1);
break;
case 3: // Exponential distribution
do {
u = randexp(m);
} while (u < 1);
break;
}
return u;
}
// Generate discrete integer between minN and maxN
double randd(double minN, double maxN, int dist)
{
double u = 0.0;
double z = 0.0;
int cnt = 0;
int i = 0;
do {
if (dist == 1) {
u = randu(minN-1, maxN+1);
}
else if (dist == 2){
u = randpg((minN+maxN)/2.0, (maxN-minN)/2.0);
}
else if (dist == 3){
u = randexp((minN+maxN)/2.0);
}
for (i = 0; i < (maxN-minN+1); i++){
if (u >= minN-0.5+i && u < minN+0.5+i){
z = minN + i;
cnt = 1;
break;
}
}
} while (u == maxN+0.5 || cnt == 0);
return z;
}
// Shifted Hill Function
double Hillshift (double x, double x0, double nx, double lamda)
{
double out;
out = lamda + (1.0 - lamda) * (1.0/(1.0 + pow((x/x0), nx)));
return out;
}
// Find median of an array
double median(double *x, int n)
{
double tmp;
int i = 0;
int j = 0;
// Sorting
for (i = 0; i < n-1; i++) {
for (j = i+1; j < n; j++){
if (x[j] < x[i]) {
tmp = x[i];
x[i] = x[j];
x[j] = tmp;
}
}
}
if (n%2==0) {
return ((x[n/2] + x[n/2 - 1]) / 2.0);
}
else {
return x[n/2];
}
}
// Distance between two solutions
double sumdelta (double *y, double *ytmp, int NEQN)
{
int i = 0;
double out = 0.0;
for (i = 0; i < NEQN; i++){
out = out + (y[i] - ytmp[i])*(y[i] - ytmp[i]);
}
return sqrt(out);
}
/*********Preprocess the topology file (.topo) or configure file (.cfg) for RACIPE method*********/
void check_inputfile (int argc, char **argv, struct topo *topoinfo, struct opts *simu_opts, struct rlt *tmprlt)
{
char *token;
char *fileextension;
char *inputfile;
// Check the input arguments
if (argc == 1) {
printf("### Missing: Missing input file!\n");
exit(3);
}
else if (argc == 2 && strcmp(argv[1], "-h") == 0){
printf("Available options:\n");
printf("-h : Show all available options.\n");
printf("-maxtime : Maximum time for the simulation (Default 23.5 h).\n");
printf("-threads : Number of threads to be used for the simulation (Default 3).\n");
printf("-solver : The integrator method (1 --> Euler or 2 --> RK45) to solve the ODEs equations (Default 1).\n");
printf("-flag : run RACIPE to produce the .cfg file only or the whole simulation (Default 0, perform the whole simulation).\n");
printf("-KDID : Gene or link (See their ID in the .cfg file) to be knocked down.\n");
printf("-OEID : Gene (See their ID in the .cfg file) to be overexpressed. (follow by -OEFD).\n");
printf("-OEFD : Fold change to overexpress a gene (-OEID must be first set in the option, the value need to be bigger than 1). (Default 1) if the corresponding OEFD is not set, it will be set to 1.\n");
printf("-DEID : Gene (See their ID in the .cfg file) to be downexpressed. (follow by -DEFD).\n");
printf("-DEFD : Fold change to downexpress a gene (-DEID must be first set in the option, the value need to be bigger than 1). (Default 1) if the corresponding DEFD is not set, it will be set to 1.\n");
printf("-dist : Distribution used for randomization\n");
printf(" 1 ---> Uniform Distribution (Default)\n");
printf(" 2 ---> Guassian Distribution\n");
printf(" 3 ---> Exponential Distribution\n");
printf("-SF : Scale the distribution ranges of all the parameters except for the hill coefficients, should be smaller than 1 (Default 1).\n");
printf("-num_findT : The number of simulations used to estimate the thresholds (Default 10000).\n");
printf("-num_paras : The number of RACIPE models to generate (Default 100).\n");
printf("-num_ode : The number of Random initial values to solve ODEs (Default 100).\n");
printf("-num_stability : The maximum number of stable states to save for one RACIPE model (Default 10).\n");
printf("-thrd : Cutoff for convergence of steady states for numerically solving ODEs (Default 1.0).\n");
printf("-Toggle_f_p : Save parameters of each RACIPE model or not (Default 1 (yes)).\n");
printf("-stepsize : Stepsize for solving ODEs (Default 0.1).\n");
printf("-maxiters : The maximum number of iterations for solving ODEs at each random initial condition times 1000 (Default 20).\n");
printf("-Toggle_T_test : Test the threshold assumption or not (Default 1 (yes)).\n");
printf("-SBML_model : Output a model in the SBML format. The parameter will be the ID of the model (start from 1) to save (Default 0 (no SBML output)).\n");
printf("-seed : random seed (Default 1).\n");
printf("-minP : Minimum production rate (Default 1.0)\n");
printf("-maxP : Maximum production rate (Default 100.0)\n");
printf("-minK : Minimum degradation rate (Default 0.1)\n");
printf("-maxK : Maximum degradation rate (Default 1.0)\n");
printf("-minN : Minimum Hill coefficient (Default 1.0)\n");
printf("-maxN : Maximum Hill coefficient (Default 6.0)\n");
printf("-minF : Minimum fold change (Default 1.0)\n");
printf("-maxF : Maximum fold change (Default 100.0)\n");
exit(6);
}
inputfile = strdup(argv[1]);
token = strtok(inputfile, ".");
topoinfo->modelname = strdup(token);
token = strtok(NULL, ".");
fileextension = strdup(token); // Whether .topo or .cfg file is the input file
if (strcmp(fileextension, "topo") == 0) {
simu_opts->exts = 0; // flag for input file as topo file
initial_simuopts (argc, argv, topoinfo, simu_opts);
Model_generate (argv[1], topoinfo, simu_opts, tmprlt);
}
else if ((strcmp(fileextension, "cfg") == 0)) {
simu_opts->exts = 1; // flag for input file as cfg file
read_cfg (topoinfo, simu_opts, tmprlt);
initial_simuopts (argc, argv, topoinfo, simu_opts);
Model_generate (argv[1], topoinfo, simu_opts, tmprlt);
}
else {
printf("### Wrong: Input file is not recognized. It should be .topo or .cfg file!\n");
exit(4);
}
}
// Initialize the setting options for the simulations.
void initial_simuopts (int argc, char **argv, struct topo *topoinfo, struct opts *simu_opts)
{
int i = 0;
int j = 0;
int KDmarker = 0;
int OEmarker = 0;
int DEmarker = 0;
char *end;
FILE *testexistence;
char KDIDname[100] = "";
char parasname[100] = "";
if (simu_opts->exts == 1) {
strcpy(parasname, topoinfo->modelname);
if (simu_opts->numKD != 0){
for (i = 0; i < simu_opts->numKD; i++){
sprintf(KDIDname, "%d", simu_opts->KDID[i]);
strcat(parasname, "_");
strcat(parasname, KDIDname);
}
}
strcat(parasname, ".prs");
testexistence = fopen(parasname, "r");
if (testexistence != NULL) {
fclose(testexistence);
remove(parasname);
}
}
// Default setting
if (simu_opts->exts == 0){
simu_opts->maxtime = 23.5;
simu_opts->solver = 1;
simu_opts->flag = 0;
simu_opts->numKD = 0;
simu_opts->KDID = (int *)calloc(20, sizeof(int));
simu_opts->numOE = 0;
simu_opts->OEID = (int *)calloc(20, sizeof(int));
simu_opts->OEFD = (double *)calloc(20, sizeof(double));
for (i = 0; i < 20; i++){
simu_opts->OEFD[i] = 1.0;
}
simu_opts->numDE = 0;
simu_opts->DEID = (int *)calloc(20, sizeof(int));
simu_opts->DEFD = (double *)calloc(20, sizeof(double));
for (i = 0; i < 20; i++){
simu_opts->DEFD[i] = 1.0;
}
simu_opts->distname = strdup("Uniform");
simu_opts->dist = 1;
simu_opts->SF = 1.0;
simu_opts->num_findT = 10000;
simu_opts->num_paras = 100;
simu_opts->num_ode = 100;
simu_opts->num_stability = 10;
simu_opts->thrd = 1.0;
simu_opts->Toggle_f_p = 1;
simu_opts->stepsize = 0.1;
simu_opts->maxiters = 20;
simu_opts->Toggle_T_test = 1;
simu_opts->SBML_model = 0;
simu_opts->myseed = 1;
simu_opts->minP = 1.0;
simu_opts->maxP = 100.0;
simu_opts->minK = 0.1;
simu_opts->maxK = 1.0;
simu_opts->minN = 1.0;
simu_opts->maxN = 6.0;
simu_opts->minF = 1.0;
simu_opts->maxF = 100.0;
if (argc == 2){
printf("### Warning: Uniform distribution is used.\n");
}
}
// Set up the customized options
if (argc >= 3){
if ((argc - 2)%2 != 0){
printf("### Wrong: No enough input arguments!\n");
exit(8);
}
for (i = 2; i < argc; i = i + 2){
if (strcmp(argv[i], "-dist") == 0){
switch (atoi(argv[i+1])) {
case 1 :
printf("Uniform distribution is used.\n");
simu_opts->distname = strdup("Uniform");
simu_opts->dist = 1;
break;
case 2 :
printf("Guassion distribution is used.\n");
simu_opts->distname = strdup("Guassian");
simu_opts->dist = 2;
break;
case 3 :
printf("Exponential distribution is used.\n");
simu_opts->distname = strdup("Exponential");
simu_opts->dist = 3;
break;
default:
printf("The distribution you selected is not recognized! Please select the follwing distribution:\n");
printf("1 ---> Uniform Distribution\n");
printf("2 ---> Guassian Distribution\n");
printf("3 ---> Exponential Distribution\n");
exit(3);
}
}
else if (strcmp(argv[i], "-SF") == 0){
simu_opts->SF = atof(argv[i+1]);
printf("### Warning: Hill coefficients will not be scaled!\n");
}
else if (strcmp(argv[i], "-solver") == 0){
simu_opts->solver = atoi(argv[i+1]);
if (simu_opts->solver == 1) {
printf("1st Euler method is used to solve ODEs.\n");
}
else if (simu_opts->solver == 2) {
printf("RK45 method is used to solve ODEs.\n");
}
else {
printf("### Wrong: no such options for solver!\n");
exit(0);
}
}
else if (strcmp(argv[i], "-num_findT") == 0){
simu_opts->num_findT = atoi(argv[i+1]);
}
else if (strcmp(argv[i], "-num_paras") == 0){
simu_opts->num_paras = atoi(argv[i+1]);
}
else if (strcmp(argv[i], "-num_ode") == 0){
simu_opts->num_ode = atoi(argv[i+1]);
}
else if (strcmp(argv[i], "-num_stability") == 0){
simu_opts->num_stability = atoi(argv[i+1]);
}
else if (strcmp(argv[i], "-thrd") == 0){
simu_opts->thrd = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-Toggle_f_p") == 0){
simu_opts->Toggle_f_p = atoi(argv[i+1]);
}
else if (strcmp(argv[i], "-stepsize") == 0){
simu_opts->stepsize = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-maxiters") == 0){
simu_opts->maxiters = atoi(argv[i+1]);
}
else if (strcmp(argv[i], "-Toggle_T_test") == 0){
simu_opts->Toggle_T_test = atoi(argv[i+1]);
}
else if (strcmp(argv[i], "-SBML_model") == 0){
simu_opts->SBML_model = atoi(argv[i+1]);
}
else if (strcmp(argv[i], "-KDID") == 0){
if (simu_opts->exts == 1 && KDmarker == 0){
KDmarker = 1;
simu_opts->numKD = 0;
simu_opts->KDID = (int *)calloc(20, sizeof(int));
}
simu_opts->KDID[simu_opts->numKD] = atoi(argv[i+1]);
simu_opts->numKD = simu_opts->numKD + 1;
}
else if (strcmp(argv[i], "-OEID") == 0){
if (simu_opts->exts == 1 && OEmarker == 0){
OEmarker = 1;
simu_opts->numOE = 0;
simu_opts->OEID = (int *)calloc(20, sizeof(int));
simu_opts->OEFD = (double *)calloc(20, sizeof(double));
for (j = 0; j < 20; j++){
simu_opts->OEFD[j] = 1.0;
}
}
simu_opts->OEID[simu_opts->numOE] = atoi(argv[i+1]);
simu_opts->numOE = simu_opts->numOE + 1;
}
else if (strcmp(argv[i], "-OEFD") == 0){
simu_opts->OEFD[simu_opts->numOE-1] = atof(argv[i+1]);
// for (j = 0; j < 20; j++){
// printf("%f\t", simu_opts->OEFD[j]);
// }
// printf("\n");
}
else if (strcmp(argv[i], "-DEID") == 0){
if (simu_opts->exts == 1 && DEmarker == 0){
DEmarker = 1;
simu_opts->numDE = 0;
simu_opts->DEID = (int *)calloc(20, sizeof(int));
simu_opts->DEFD = (double *)calloc(20, sizeof(double));
for (j = 0; j < 20; j++){
simu_opts->DEFD[j] = 1.0;
}
}
simu_opts->DEID[simu_opts->numDE] = atoi(argv[i+1]);
simu_opts->numDE = simu_opts->numDE + 1;
}
else if (strcmp(argv[i], "-DEFD") == 0){
simu_opts->DEFD[simu_opts->numDE-1] = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-flag") == 0){
simu_opts->flag = atoi(argv[i+1]);
}
else if (strcmp(argv[i], "-maxtime") == 0){
simu_opts->maxtime = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-seed") == 0){
simu_opts->myseed = strtoull(argv[i+1], &end, 10);
}
else if (strcmp(argv[i], "-minP") == 0){
simu_opts->minP = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-maxP") == 0){
simu_opts->maxP = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-minK") == 0){
simu_opts->minK = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-maxK") == 0){
simu_opts->maxK = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-minN") == 0){
simu_opts->minN = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-maxN") == 0){
simu_opts->maxN = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-minF") == 0){
simu_opts->minF = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-maxF") == 0){
simu_opts->maxF = atof(argv[i+1]);
}
else if (strcmp(argv[i], "-threads") == 0){
threads = atof(argv[i+1]);
}
else{
printf("### Wrong: Can not recognize the input arguments.\n");
printf("Please use one of the follwing:\n");
printf("-h : Show all available options.\n");
printf("-maxtime : Maximum time for the simulation (Default 23.5 h).\n");
printf("-threads : Number of threads to be used for the simulation (Default 3).\n");
printf("-solver : The integrator method (1 --> Euler or 2 --> RK45) to solve the ODEs equations (Default 1).\n");
printf("-flag : run RACIPE to produce the .cfg file only or the whole simulation (Default 0, perform the whole simulation).\n");
printf("-KDID : Gene or link (See their ID in the .cfg file) to be knocked down.\n");
printf("-OEID : Gene (See their ID in the .cfg file) to be overexpressed. (follow by -OEFD).\n");
printf("-OEFD : Fold change to overexpress a gene (-OEID must be first set in the option, the value need to be bigger than 1). (Default 1) if the corresponding OEFD is not set, it will be set to 1.\n");
printf("-DEID : Gene (See their ID in the .cfg file) to be downexpressed. (follow by -DEFD).\n");
printf("-DEFD : Fold change to downexpress a gene (-DEID must be first set in the option, the value need to be bigger than 1). (Default 1) if the corresponding DEFD is not set, it will be set to 1.\n");
printf("-dist : Distribution used for randomization\n");
printf(" 1 ---> Uniform Distribution (Default)\n");
printf(" 2 ---> Guassian Distribution\n");
printf(" 3 ---> Exponential Distribution\n");
printf("-SF : Scale the distribution ranges of all the parameters except for the hill coefficients, should be smaller than 1 (Default 1).\n");
printf("-num_findT : The number of simulations used to estimate the thresholds (Default 10000).\n");
printf("-num_paras : The number of RACIPE models to generate (Default 100).\n");
printf("-num_ode : The number of Random initial values to solve ODEs (Default 100).\n");
printf("-num_stability : The maximum number of stable states to save for one RACIPE model (Default 10).\n");
printf("-thrd : Cutoff for convergence of steady states for numerically solving ODEs (Default 1.0).\n");
printf("-Toggle_f_p : Save parameters of each RACIPE model or not (Default 1 (yes)).\n");
printf("-stepsize : Stepsize for solving ODEs (Default 0.1).\n");
printf("-maxiters : The maximum number of iterations for solving ODEs at each random initial condition times 1000 (Default 20).\n");
printf("-Toggle_T_test : Test the threshold assumption or not (Default 1 (yes)).\n");
printf("-SBML_model : Output a model in the SBML format. The parameter will be the ID of the model (start from 1) to save (Default 0 (no SBML output)).\n");
printf("-seed : random seed (Default 1).\n");
printf("-minP : Minimum production rate (Default 1.0)\n");
printf("-maxP : Maximum production rate (Default 100.0)\n");
printf("-minK : Minimum degradation rate (Default 0.1)\n");
printf("-maxK : Maximum degradation rate (Default 1.0)\n");
printf("-minN : Minimum Hill coefficient (Default 1.0)\n");
printf("-maxN : Maximum Hill coefficient (Default 6.0)\n");
printf("-minF : Minimum fold change (Default 1.0)\n");
printf("-maxF : Maximum fold change (Default 100.0)\n");
exit(6);
}
}
}
}
// Generate the models, parameter ranges for randomization and the configure file.
void Model_generate (char *inputfile, struct topo *topoinfo, struct opts *simu_opts, struct rlt *tmprlt)
{
int i = 0;
char KDIDname[100] = "";
char OEIDname[100] = "";
char DEIDname[100] = "";
// Section 1 parameters .cfg
FILE *fin;
FILE *fout;
char configname[100] = "";
// Section 2 parameters .prs
FILE *fparas = NULL;
char parasname[100] = "";
// Section 1 --- Printout the number of genes involved and transform the topo file
if (simu_opts->exts == 0){
fin = fopen(inputfile, "r");
if (fin == NULL){
printf("### Wrong: No topology files are found!\n");
exit(2);
}
strcpy(configname, topoinfo->modelname);
if (simu_opts->numKD != 0){
strcat(configname, "_KD");
for (i = 0; i < simu_opts->numKD; i++){
sprintf(KDIDname, "%d", simu_opts->KDID[i]);
strcat(configname, "_");
strcat(configname, KDIDname);
}
}
if (simu_opts->numOE != 0){
strcat(configname, "_OE");
for (i = 0; i < simu_opts->numOE; i++){
sprintf(OEIDname, "%d", simu_opts->OEID[i]);
strcat(configname, "_");
strcat(configname, OEIDname);
}
}
if (simu_opts->numDE != 0){
strcat(configname, "_DE");
for (i = 0; i < simu_opts->numDE; i++){
sprintf(DEIDname, "%d", simu_opts->DEID[i]);
strcat(configname, "_");
strcat(configname, DEIDname);
}
}
strcat(configname, ".cfg");
fout = fopen(configname, "w");
}
else{
strcpy(configname, topoinfo->modelname);
strcat(configname, "_tmp.cfg");
fout = fopen(configname, "w");
printf("### Warning: Current configure information is stored in %s file\n", configname);
}
// Parameters for the simulations, stored in .cfg file or _tmp.cfg file
fprintf(fout, "Distribution\t%d\t%s\n", simu_opts->dist, simu_opts->distname);
fprintf(fout, "ScaleFactor\t%f\n", simu_opts->SF);
fprintf(fout, "NumberOfSimulationToFindThreshold\t%d\n", simu_opts->num_findT);
fprintf(fout, "NumberOfRACIPEModels\t%d\n", simu_opts->num_paras);
fprintf(fout, "NumberOfRIVs\t%d\n", simu_opts->num_ode);
fprintf(fout, "NumberOfStatesToStore\t%d\n", simu_opts->num_stability);
fprintf(fout, "ThresholdForConvergence\t%f\n", simu_opts->thrd);
fprintf(fout, "ToggleOfSaveParameters\t%d\n", simu_opts->Toggle_f_p);
fprintf(fout, "Stepsize\t%f\n", simu_opts->stepsize);
fprintf(fout, "MaximumOfIterations\t%d\n", simu_opts->maxiters);
fprintf(fout, "TestThreshold\t%d\n", simu_opts->Toggle_T_test);
fprintf(fout, "number_of_KDs\t%d\n", simu_opts->numKD);
fprintf(fout, "number_of_OEs\t%d\n", simu_opts->numOE);
fprintf(fout, "number_of_DEs\t%d\n", simu_opts->numDE);
if (simu_opts->numKD == 0){
fprintf(fout, "KD_ID\t%d\n", 0);
}
else{
fprintf(fout, "KD_ID");
for (i = 0; i < simu_opts->numKD; i++){
fprintf(fout, "\t%d", simu_opts->KDID[i]);
}
fprintf(fout, "\n");
}
if (simu_opts->numOE == 0){
fprintf(fout, "OE_ID\t%d\n", 0);
fprintf(fout, "OE_Fold_Change\t%f\n", 0.0);
}
else{
fprintf(fout, "OE_ID");
for (i = 0; i < simu_opts->numOE; i++){
fprintf(fout, "\t%d", simu_opts->OEID[i]);
}
fprintf(fout, "\n");
fprintf(fout, "OE_Fold_Change");
for (i = 0; i < simu_opts->numOE; i++){
fprintf(fout, "\t%f", simu_opts->OEFD[i]);
}
fprintf(fout, "\n");
}
if (simu_opts->numDE == 0){
fprintf(fout, "DE_ID\t%d\n", 0);
fprintf(fout, "DE_Fold_Change\t%f\n", 0.0);
}
else{
fprintf(fout, "DE_ID");
for (i = 0; i < simu_opts->numDE; i++){
fprintf(fout, "\t%d", simu_opts->DEID[i]);
}
fprintf(fout, "\n");
fprintf(fout, "DE_Fold_Change");
for (i = 0; i < simu_opts->numDE; i++){
fprintf(fout, "\t%f", simu_opts->DEFD[i]);
}
fprintf(fout, "\n");
}
fprintf(fout, "MaximumRunningTime\t%f\n", simu_opts->maxtime);
fprintf(fout, "Seed\t%lld\n", simu_opts->myseed);
fprintf(fout, "minP\t%f\n", simu_opts->minP);
fprintf(fout, "maxP\t%f\n", simu_opts->maxP);
fprintf(fout, "minK\t%f\n", simu_opts->minK);
fprintf(fout, "maxK\t%f\n", simu_opts->maxK);
fprintf(fout, "minN\t%f\n", simu_opts->minN);
fprintf(fout, "maxN\t%f\n", simu_opts->maxN);
fprintf(fout, "minF\t%f\n", simu_opts->minF);
fprintf(fout, "maxF\t%f\n", simu_opts->maxF);
if (simu_opts->exts == 0){
check_topo(fin, fout, topoinfo); // read in the topo file
fclose(fin);
// initial struct rlt *tmprlt
tmprlt->Nstb = 0;
tmprlt->numover = (int *) calloc(topoinfo->numG, sizeof(int));
tmprlt->numdown = (int *) calloc(topoinfo->numG, sizeof(int));
tmprlt->cnt_store = (int *) calloc(simu_opts->num_stability, sizeof(int));
tmprlt->y_store = (double *) calloc(simu_opts->num_ode*topoinfo->numG, sizeof(double));
tmprlt->soln = (double *) calloc(simu_opts->num_stability*topoinfo->numG, sizeof(double));
tmprlt->paras = (double *) calloc(3*topoinfo->numR+2*topoinfo->numG, sizeof(double));
}
fclose(fout);
fin = NULL;
fout = NULL;
// S2 --- Generate the randomization range
strcpy(parasname, topoinfo->modelname);
if (simu_opts->exts == 0){
if (simu_opts->numKD != 0){
strcat(parasname, "_KD");
for (i = 0; i < simu_opts->numKD; i++){
sprintf(KDIDname, "%d", simu_opts->KDID[i]);
strcat(parasname, "_");
strcat(parasname, KDIDname);
}
}
if (simu_opts->numOE != 0){
strcat(parasname, "_OE");
for (i = 0; i < simu_opts->numOE; i++){
sprintf(OEIDname, "%d", simu_opts->OEID[i]);
strcat(parasname, "_");
strcat(parasname, OEIDname);
}
}
if (simu_opts->numDE != 0){
strcat(parasname, "_DE");
for (i = 0; i < simu_opts->numDE; i++){
sprintf(DEIDname, "%d", simu_opts->DEID[i]);
strcat(parasname, "_");
strcat(parasname, DEIDname);
}
}
}
strcat(parasname, ".prs");
if (simu_opts->exts == 0){
fparas = fopen(parasname, "w+");
generate_random_range(fparas, topoinfo, simu_opts);
}
else{
fparas = fopen(parasname, "r");
if (fparas == NULL){
printf("### Warning: .prs file will be regenerated according to current .cfg file\n");
fparas = fopen(parasname, "w+");
generate_random_range(fparas, topoinfo, simu_opts);
}
else{
printf("### Wrong: old .prs file is detected and needs to be deleted!\n");
exit(11);
}
}
fclose(fparas);
fparas = NULL;
if (simu_opts->flag == 1){
printf(".cfg file is produced!\n");
exit(10);
}
}
// Read in the topology information
void check_topo(FILE *f_in, FILE *f_out, struct topo *topoinfo)
{
int tmpnumG = 0;
int tmpnumR = 0;
int RT = 0; //regulation type
int count = 0;
int i = 0;
int j = 0;
int cntP = 0;
int matchnum = 0;
char G1[100] = "";
char G2[100] = "";
topoinfo->Gname = (char**)malloc(sizeof(char *));
if (f_in == NULL){
printf("Topology file is missing!\n");
exit(1);
}
rewind(f_in);
fscanf(f_in, "%*[^\n]\n", NULL); //skip first line of topo file
while (fscanf(f_in, "%s\t%s\t%d\n", G1, G2, &RT) == 3) {
tmpnumR++;
// Check the types of regualtions: 1 --> Activation; 2 --> Inhibition;
if (RT != 1 && RT != 2){
printf("ERROR: Number %d regulation is not recognized\n", tmpnumR);
exit(1);
}
if (count == 0) { // second line of topo file
if (strcmp(G1, G2) == 0) {
count = count + 1;
topoinfo->Gname = (char**)realloc(topoinfo->Gname, (count)*sizeof(char *));
topoinfo->Gname[count-1] = (char*)malloc(sizeof(G1));
strcpy(topoinfo->Gname[count-1], G1);
tmpnumG = tmpnumG + 1;
}
else {
count = count + 2;
topoinfo->Gname = (char**)realloc(topoinfo->Gname, (count)*sizeof(char *));
topoinfo->Gname[count-2] = (char*)malloc(sizeof(G1));
topoinfo->Gname[count-1] = (char*)malloc(sizeof(G2));
strcpy(topoinfo->Gname[count-2], G1);
strcpy(topoinfo->Gname[count-1], G2);
tmpnumG = tmpnumG + 2;
}
}
else {
matchnum = 0;
// compare G1 first
for (i = 0; i < count; i++) {
if (strcmp(G1, topoinfo->Gname[i]) == 0) {
matchnum = matchnum + 1;
}
}
if (matchnum == 0) {
count = count + 1;
topoinfo->Gname = (char**)realloc(topoinfo->Gname, (count)*sizeof(char *));
topoinfo->Gname[count-1] = (char*)malloc(sizeof(G1));
strcpy(topoinfo->Gname[count-1], G1);
tmpnumG = tmpnumG + 1;
}
if (strcmp(G1, G2) != 0) {
matchnum = 0;
for (i = 0; i < count; i++) {
if (strcmp(G2, topoinfo->Gname[i]) == 0) {
matchnum = matchnum + 1;
}
}
if (matchnum == 0) {
count = count + 1;
topoinfo->Gname = (char**)realloc(topoinfo->Gname, (count)*sizeof(char *));
topoinfo->Gname[count-1] = (char*)malloc(sizeof(G2));
strcpy(topoinfo->Gname[count-1], G2);
tmpnumG = tmpnumG + 1;
}
}
}
}
// Screen printout
printf("\n-------------------------------------------\n");
printf("The topo file contains the following genes:\n");
printf("Gene_ID -- Gene_Name\n");
for (i = 0; i < count; i++) {
printf("%d -- %s\n", i+1, topoinfo->Gname[i]);
}
printf("-------------------------------------------\n");
topoinfo->numR = tmpnumR;
topoinfo->numG = tmpnumG;
printf("Total number of genes = %d\n", tmpnumG);
printf("Total number of regulations = %d\n", tmpnumR);
printf("-------------------------------------------\n");
// transformation of the topo file
topoinfo->SourceG = (int *)calloc(tmpnumR, sizeof(int));
topoinfo->TargetG = (int *)calloc(tmpnumR, sizeof(int));
topoinfo->TypeR = (int *)calloc(tmpnumR, sizeof(int));
topoinfo->ParasPos = (int *)calloc(tmpnumR, sizeof(int));
count = 0;
rewind(f_in);
fscanf(f_in, "%*[^\n]\n", NULL); //skip first line of topo file
while (fscanf(f_in, "%s\t%s\t%d\n", G1, G2, &RT) == 3) {
count++;
for (i = 0; i < tmpnumG; i++) {
if (strcmp(G1, topoinfo->Gname[i]) == 0) {
topoinfo->SourceG[count-1] = i;
}
if (strcmp(G2, topoinfo->Gname[i]) == 0) {
topoinfo->TargetG[count-1] = i;
}
}
topoinfo->TypeR[count-1] = RT;
}
fprintf(f_out, "NumberOfRegulations\t%d\n", topoinfo->numR);
fprintf(f_out, "NumberOfGenes\t%d\n", topoinfo->numG);
for (i = 0; i < topoinfo->numG; i++) {
fprintf(f_out, "%d\t%s\n", i+1, topoinfo->Gname[i]);
}
for (i = 0; i < topoinfo->numR; i++){
fprintf(f_out, "%d\t%d\t%d\t%d\n", topoinfo->numG+i+1, topoinfo->SourceG[i]+1, topoinfo->TargetG[i]+1, topoinfo->TypeR[i]);
}
for(i = 0; i < topoinfo->numG; i++){
for (j = 0; j < topoinfo->numR; j++){
if (topoinfo->TargetG[j] == i){
topoinfo->ParasPos[j] = 3*cntP + 2*topoinfo->numG; // Position of threshold parameters for each regulation.
cntP = cntP + 1;
}
}
}
// for (i = 0; i < topoinfo->numR; i++){
// printf("%d\n", topoinfo->ParasPos[i]);
// }
}
// Generate the parameter ranges for randomization and .prs file
void generate_random_range(FILE *f_paras, struct topo *topoinfo, struct opts *simu_opts)
{
char tmpparasname[100] = "";
double tmpmin = 0.0;
double tmpmax = 0.0;
int typeR = 0;
int num = simu_opts->num_findT;
int dist = simu_opts->dist;
double SF = simu_opts->SF;
/**** Default ranges for each class of parameters ****/
double minP_d = simu_opts->minP;
double maxP_d = simu_opts->maxP;
double minK_d = simu_opts->minK;
double maxK_d = simu_opts->maxK;
double minN_d = simu_opts->minN;
double maxN_d = simu_opts->maxN;
double minF_d = simu_opts->minF;
double maxF_d = simu_opts->maxF;
/**** Default ranges for each class of parameters ****/
double minP = 0.0;
double maxP = 0.0;
double minK = 0.0;
double maxK = 0.0;
double minN = 0.0;
double maxN = 0.0;
double minF = 0.0;
double maxF = 0.0;
double meanP = 0.0;
double stdP = 0.0;
double meanK = 0.0;
double stdK = 0.0;
double meanF = 0.0;
double stdF = 0.0;
int i = 0;
int j = 0;
double *minT;
double *maxT;
minT = (double *)calloc(topoinfo->numG, sizeof(double));
maxT = (double *)calloc(topoinfo->numG, sizeof(double));
double *meanT;
double *stdT;
meanT = (double *)calloc(topoinfo->numG, sizeof(double));
stdT = (double *)calloc(topoinfo->numG, sizeof(double));
double *amplifyfold; // The fold changes need to applied in order to make the minT bigger than 0.01
double tmpfold;
amplifyfold = (double *)calloc(topoinfo->numG, sizeof(double));
switch (dist) {
case 1 :
minP = ((minP_d + maxP_d) - SF * (maxP_d - minP_d))/2.0;
maxP = ((minP_d + maxP_d) + SF * (maxP_d - minP_d))/2.0;
minK = ((minK_d + maxK_d) - SF * (maxK_d - minK_d))/2.0;
maxK = ((minK_d + maxK_d) + SF * (maxK_d - minK_d))/2.0;
minN = minN_d;
maxN = maxN_d;
minF = ((minF_d + maxF_d) - SF * (maxF_d - minF_d))/2.0;
maxF = ((minF_d + maxF_d) + SF * (maxF_d - minF_d))/2.0;
fprintf(f_paras, "Parameter\tMinimum_value\tMaximum_Value\tRegulation_type\n");
// Format of parameter files: name minV maxV type_of_regulation
// estimate the threshold
printf("Amplification of the parameter ranges (production rates and thresholds)\n");
for(i = 0; i < topoinfo->numG; i++){
// estimate threshold and printout
estimate_threshold(num, i, minP, maxP, minK, maxK, minN, maxN, minF, maxF, minT, maxT, topoinfo, dist, SF);
// printf("%s = %f\n", topoinfo->Gname[i], (minT+maxT)/2.0);
if (minT[i] < 0.01){
tmpfold = 10.0;
while ((minT[i]*tmpfold) < 0.01){
tmpfold = tmpfold*10.0;
}
amplifyfold[i] = tmpfold;
}
else{
amplifyfold[i] = 1;
}
printf("%s\t%f\n", topoinfo->Gname[i], amplifyfold[i]);
}
// production rate
for (i = 0; i < topoinfo->numG; i++){
fprintf(f_paras, "Prod_of_%s\t%f\t%f\t%d\n", topoinfo->Gname[i], minP*amplifyfold[i], maxP*amplifyfold[i], 0);
}
// degradation rate
for (i = 0; i < topoinfo->numG; i++){
fprintf(f_paras, "Deg_of_%s\t%f\t%f\t%d\n", topoinfo->Gname[i], minK, maxK, 0);
}
for(i = 0; i < topoinfo->numG; i++){
for (j = 0; j < topoinfo->numR; j++){
if (topoinfo->TargetG[j] == i){
// Threshold
fprintf(f_paras, "Trd_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], minT[topoinfo->SourceG[j]]*amplifyfold[topoinfo->SourceG[j]], maxT[topoinfo->SourceG[j]]*amplifyfold[topoinfo->SourceG[j]], 0);
// Number of binding sites
fprintf(f_paras, "Num_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], minN, maxN, 0);
// Fold change of a regulation
if (topoinfo->TypeR[j] == 1) { //Activation
fprintf(f_paras, "Act_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], minF, maxF, topoinfo->TypeR[j]);
}
else if (topoinfo->TypeR[j] == 2) { //Inhibition
fprintf(f_paras, "Inh_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], minF, maxF, topoinfo->TypeR[j]);
}
}
}
}
free(minT);
free(maxT);
free(meanT);
free(stdT);
break;
case 2 :
// Format of parameter files: name meanV stdV
meanP = (minP_d+maxP_d)/2.0;
stdP = (maxP_d-minP_d)*SF/2.0;
meanK = (minK_d+maxK_d)/2.0;
stdK = (maxK_d-minK_d)*SF/2.0;
meanF = (minF_d+maxF_d)/2.0;
stdF = (maxF_d-minF_d)*SF/2.0;
fprintf(f_paras, "Parameter\tMean\tStandard_deviation\tRegulation_type\n");
printf("Amplification of the parameter ranges (production rates and thresholds)\n");
for(i = 0; i < topoinfo->numG; i++){
// estimate threshold and printout
estimate_threshold(num, i, meanP, stdP, meanK, stdK, minN_d, maxN_d, meanF, stdF, meanT, stdT, topoinfo, dist, SF);
// printf("%s = %f\n", topoinfo->Gname[i], (minT+maxT)/2.0);
if (minT[i] < 0.01){
tmpfold = 10.0;
while ((minT[i]*tmpfold) < 0.01){
tmpfold = tmpfold*10.0;
}
amplifyfold[i] = tmpfold;
}
else{
amplifyfold[i] = 1;
}
printf("%s\t%f\n", topoinfo->Gname[i], amplifyfold[i]);
}
// production rate
for (i = 0; i < topoinfo->numG; i++){
fprintf(f_paras, "Prod_of_%s\t%f\t%f\t%d\n", topoinfo->Gname[i], meanP*amplifyfold[i], stdP*amplifyfold[i], 0);
}
// degradation rate
for (i = 0; i < topoinfo->numG; i++){
fprintf(f_paras, "Deg_of_%s\t%f\t%f\t%d\n", topoinfo->Gname[i], meanK, stdK, 0);
}
for(i = 0; i < topoinfo->numG; i++){
for (j = 0; j < topoinfo->numR; j++){
if (topoinfo->TargetG[j] == i){
// Threshold
fprintf(f_paras, "Trd_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], meanT[topoinfo->SourceG[j]]*amplifyfold[topoinfo->SourceG[j]], stdT[topoinfo->SourceG[j]]*amplifyfold[topoinfo->SourceG[j]], 0);
// Number of binding sites
fprintf(f_paras, "Num_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], minN_d, maxN_d, 0);
// Fold change of a regulation
if (topoinfo->TypeR[j] == 1) { //Activation
fprintf(f_paras, "Act_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], meanF, stdF, topoinfo->TypeR[j]);
}
else if (topoinfo->TypeR[j] == 2) { //Inhibition
fprintf(f_paras, "Inh_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], meanF, stdF, topoinfo->TypeR[j]);
}
}
}
}
free(minT);
free(maxT);
free(meanT);
free(stdT);
break;
case 3 :
// Format of parameter files: name meanV stdV
meanP = (minP_d+maxP_d)*SF/2.0;
meanK = (minK_d+maxK_d)*SF/2.0;
meanF = (minF_d+maxF_d)*SF/2.0;
fprintf(f_paras, "Parameter\tMean\tNo_sense\tRegulation_type\n");
printf("Amplification of the parameter ranges (production rates and thresholds)\n");
for(i = 0; i < topoinfo->numG; i++){
// estimate threshold and printout
estimate_threshold(num, i, meanP, stdP, meanK, stdK, minN_d, maxN_d, meanF, stdF, meanT, stdT, topoinfo, dist, SF);
// printf("%s = %f\n", topoinfo->Gname[i], (minT+maxT)/2.0);
if (minT[i] < 0.01){
tmpfold = 10.0;
while ((minT[i]*tmpfold) < 0.01){
tmpfold = tmpfold*10.0;
}
amplifyfold[i] = tmpfold;
}
else{
amplifyfold[i] = 1;
}
printf("%s\t%f\n", topoinfo->Gname[i], amplifyfold[i]);
}
// production rate
for (i = 0; i < topoinfo->numG; i++){
fprintf(f_paras, "Prod_of_%s\t%f\t%f\t%d\n", topoinfo->Gname[i], meanP*amplifyfold[i], stdP, 0);
}
// degradation rate
for (i = 0; i < topoinfo->numG; i++){
fprintf(f_paras, "Deg_of_%s\t%f\t%f\t%d\n", topoinfo->Gname[i], meanK, stdK, 0);
}
for(i = 0; i < topoinfo->numG; i++){
for (j = 0; j < topoinfo->numR; j++){
if (topoinfo->TargetG[j] == i){
// Threshold
fprintf(f_paras, "Trd_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], meanT[topoinfo->SourceG[j]]*amplifyfold[topoinfo->SourceG[j]], stdT[topoinfo->SourceG[j]], 0);
// Number of binding sites
fprintf(f_paras, "Num_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], minN_d, maxN_d, 0);
// Fold change of a regulation
if (topoinfo->TypeR[j] == 1) { //Activation
fprintf(f_paras, "Act_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], meanF, stdF, topoinfo->TypeR[j]);
}
else if (topoinfo->TypeR[j] == 2) { //Inhibition
fprintf(f_paras, "Inh_of_%sTo%s\t%f\t%f\t%d\n", topoinfo->Gname[topoinfo->SourceG[j]], topoinfo->Gname[topoinfo->TargetG[j]], meanF, stdF, topoinfo->TypeR[j]);
}
}
}
}
free(minT);
free(maxT);
free(meanT);
free(stdT);
break;
}
topoinfo->prsrandrange = (double **)calloc(3, sizeof(double *));
topoinfo->prsrandrange[0] = (double *) calloc(3*topoinfo->numR+2*topoinfo->numG, sizeof(double));
topoinfo->prsrandrange[1] = (double *) calloc(3*topoinfo->numR+2*topoinfo->numG, sizeof(double));
topoinfo->prsrandrange[2] = (double *) calloc(3*topoinfo->numR+2*topoinfo->numG, sizeof(double));
rewind(f_paras);
fscanf(f_paras, "%*[^\n]\n", NULL);
i = 0;
while (fscanf(f_paras, "%s\t%lf\t%lf\t%d", tmpparasname, &tmpmin, &tmpmax, &typeR) == 4){
topoinfo->prsrandrange[0][i] = tmpmin;
topoinfo->prsrandrange[1][i] = tmpmax;
topoinfo->prsrandrange[2][i] = (double)typeR;
// printf("%f\t%f\t%f\n", topoinfo->prsrandrange[0][i], topoinfo->prsrandrange[1][i], topoinfo->prsrandrange[2][i]);
i++;
}
}
// Estimate the threshold range for randomization, called in generate_random_range
void estimate_threshold(int num, int ID, double minP, double maxP, double minK, double maxK, double minN, double maxN, double minF, double maxF, double *minT, double *maxT, struct topo *topoinfo, int dist, double SF)
{
int i = 0;
int j = 0;
int numA = 0;
int numI = 0;
double g = 0.0;
double k = 0.0;
double T = 0.0;
double n = 0.0;
double lambda = 0.0;
double MA = 0.0;
double MB = 0.0;
double f1 = 0.0;
double f2 = 0.0;
double *A; // A is a standalone gene
double *B;
A = (double *)calloc(num, sizeof(double));
B = (double *)calloc(num, sizeof(double));
for (i = 0; i < topoinfo->numR; i++){
if (topoinfo->TargetG[i] == ID){
if (topoinfo->TypeR[i] == 1){ //Activation
numA = numA + 1;
}
else if (topoinfo->TypeR[i] == 2){ //Inhibition
numI = numI + 1;
}
}
}
f1 = (2.0 - SF*1.96)/2.0;
f2 = (2.0 + SF*1.96)/2.0;
switch (dist){
case 1: // Uniform distribution
for (i = 0; i < num; i++){
g = randu(minP, maxP);
k = randu(minK, maxK);
A[i] = g/k;
}
MA = median(A, num);
for (i = 0; i < num; i++){
g = randu(minP, maxP);
k = randu(minK, maxK);
B[i] = g/k;
if (numA != 0){
for (j = 0; j < numA; j++){
g = randu(minP, maxP);
k = randu(minK, maxK);
n = randd(minN, maxN, dist);
T = randu(MA*f1, MA*f2);
lambda = randu(minF, maxF);
B[i] = B[i]*Hillshift(g/k, T, n, lambda)/lambda;
}
}
if (numI != 0){
for (j = 0; j < numI; j++){
g = randu(minP, maxP);
k = randu(minK, maxK);
n = randd(minN, maxN, dist);
T = randu(MA*f1, MA*f2);
lambda = 1.0/randu(minF, maxF);
B[i] = B[i]*Hillshift(g/k, T, n, lambda);
}
}
}
MB = median(B, num);
minT[ID] = MB*f1;
maxT[ID] = MB*f2;
free(A);
free(B);
break;
case 2: // non-negative Guassian distribution
for (i = 0; i < num; i++){
g = randpg(minP, maxP);
k = randpg(minK, maxK);
A[i] = g/k;
}
MA = median(A, num);
for (i = 0; i < num; i++){
g = randpg(minP, maxP);
k = randpg(minK, maxK);
B[i] = g/k;
if (numA != 0){
for (j = 0; j < numA; j++){
g = randpg(minP, maxP);
k = randpg(minK, maxK);
n = randd(minN, maxN, dist);
T = randpg(MA, (MA*f2-MA*f1)/2.0);
lambda = randfd(minF, maxF, dist);
B[i] = B[i]*Hillshift(g/k, T, n, lambda)/lambda;
}
}
if (numI != 0){
for (j = 0; j < numI; j++){
g = randpg(minP, maxP);
k = randpg(minK, maxK);
n = randd(minN, maxN, dist);
T = randpg(MA, (MA*f2-MA*f1)/2.0);
lambda = 1.0/randfd(minF, maxF, dist);
B[i] = B[i]*Hillshift(g/k, T, n, lambda);
}
}
}
MB = median(B, num);
minT[ID] = MB; //mean
maxT[ID] = (MB*f2-MB*f1)/2.0; //standard deviation
free(A);
free(B);
break;
case 3: // Exponential distribution
for (i = 0; i < num; i++){
g = randexp(minP);
k = randexp(minK);
A[i] = g/k;
}
MA = median(A, num);
for (i = 0; i < num; i++){
g = randexp(minP);
k = randexp(minK);
B[i] = g/k;
if (numA != 0){
for (j = 0; j < numA; j++){
g = randexp(minP);
k = randexp(minK);
n = randd(minN, maxN, dist);
T = randexp(MA);
lambda = randfd(minF, 0, dist);
B[i] = B[i]*Hillshift(g/k, T, n, lambda)/lambda;
}
}
if (numI != 0){
for (j = 0; j < numI; j++){
g = randexp(minP);
k = randexp(minK);
n = randd(minN, maxN, dist);
T = randexp(MA);
lambda = 1.0/randfd(minF, 0, dist);
B[i] = B[i]*Hillshift(g/k, T, n, lambda);
}
}
}
MB = median(B, num);
minT[ID] = MB;
maxT[ID] = 0.0;
free(A);
free(B);
break;
}
}
// Read in the configure information
void read_cfg(struct topo *topoinfo, struct opts *simu_opts, struct rlt *tmprlt)
{
int tmpID = 0;
int cntP = 0;
int i = 0;
int j = 0;
int tmpSourceG = 0;
int tmpTargetG = 0;
char configname[100] = "";
strcpy(configname, topoinfo->modelname);
strcat(configname, ".cfg");
FILE *fcfg;
char tmpparasname[1000] = "";
char tmpparasname2[1000] = "";
fcfg = fopen(configname, "r");
if (fcfg == NULL){
printf("No configure file provided!\n");
exit(2);
}
rewind(fcfg);
// simulation settings
fscanf(fcfg, "%s\t%d\t%s\n", tmpparasname, &simu_opts->dist, tmpparasname2);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->SF);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->num_findT);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->num_paras);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->num_ode);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->num_stability);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->thrd);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->Toggle_f_p);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->stepsize);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->maxiters);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->Toggle_T_test);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->numKD);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->numOE);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->numDE);
if (simu_opts->numKD == 0){
simu_opts->KDID = (int *)calloc(1, sizeof(int));
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->KDID[0]);
}
else{
simu_opts->KDID = (int *)calloc(simu_opts->numKD, sizeof(int));
fscanf(fcfg, "%s", tmpparasname);
for (i = 0; i < simu_opts->numKD; i++){
fscanf(fcfg, "%d", &simu_opts->KDID[i]);
// printf("%d\n", simu_opts->KDID[i]);
}
}
if (simu_opts->numOE == 0){
simu_opts->OEID = (int *) calloc(1, sizeof(int));
simu_opts->OEFD = (double *)calloc(1, sizeof(double));
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->OEID[0]);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->OEFD[0]);
}
else{
simu_opts->OEID = (int *) calloc(simu_opts->numOE, sizeof(int));
simu_opts->OEFD = (double *)calloc(simu_opts->numOE, sizeof(double));
fscanf(fcfg, "%s", tmpparasname);
for (i = 0; i < simu_opts->numOE; i++){
fscanf(fcfg, "%d", &simu_opts->OEID[i]);
// printf("%d\n", simu_opts->KDID[i]);
}
fscanf(fcfg, "%s", tmpparasname);
for (i = 0; i < simu_opts->numOE; i++){
fscanf(fcfg, "%lf", &simu_opts->OEFD[i]);
// printf("%d\n", simu_opts->KDID[i]);
}
}
if (simu_opts->numDE == 0){
simu_opts->DEID = (int *) calloc(1, sizeof(int));
simu_opts->DEFD = (double *)calloc(1, sizeof(double));
fscanf(fcfg, "%s\t%d\n", tmpparasname, &simu_opts->DEID[0]);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->DEFD[0]);
}
else{
simu_opts->DEID = (int *) calloc(simu_opts->numDE, sizeof(int));
simu_opts->DEFD = (double *)calloc(simu_opts->numDE, sizeof(double));
fscanf(fcfg, "%s", tmpparasname);
for (i = 0; i < simu_opts->numDE; i++){
fscanf(fcfg, "%d", &simu_opts->DEID[i]);
// printf("%d\n", simu_opts->KDID[i]);
}
fscanf(fcfg, "%s", tmpparasname);
for (i = 0; i < simu_opts->numDE; i++){
fscanf(fcfg, "%lf", &simu_opts->DEFD[i]);
// printf("%d\n", simu_opts->KDID[i]);
}
}
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->maxtime);
fscanf(fcfg, "%s\t%lld\n", tmpparasname, &simu_opts->myseed);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->minP);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->maxP);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->minK);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->maxK);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->minN);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->maxN);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->minF);
fscanf(fcfg, "%s\t%lf\n", tmpparasname, &simu_opts->maxF);
simu_opts->distname = strdup(tmpparasname2);
simu_opts->flag = 0;
// topology information
fscanf(fcfg, "%s\t%d\n", tmpparasname, &topoinfo->numR);
fscanf(fcfg, "%s\t%d\n", tmpparasname, &topoinfo->numG);
topoinfo->SourceG = (int *) calloc(topoinfo->numR, sizeof(int));
topoinfo->TargetG = (int *) calloc(topoinfo->numR, sizeof(int));
topoinfo->TypeR = (int *) calloc(topoinfo->numR, sizeof(int));
topoinfo->ParasPos = (int *) calloc(topoinfo->numR, sizeof(int));
topoinfo->Gname = (char**) calloc(topoinfo->numG, sizeof(char *));
for (i = 0; i < topoinfo->numG; i++){
topoinfo->Gname[i] = (char*) calloc(100, sizeof(char));
}
tmprlt->Nstb = 0;
tmprlt->numover = (int *) calloc(topoinfo->numG, sizeof(int));
tmprlt->numdown = (int *) calloc(topoinfo->numG, sizeof(int));
tmprlt->cnt_store = (int *) calloc(simu_opts->num_stability, sizeof(int));
tmprlt->y_store = (double *) calloc(simu_opts->num_ode*topoinfo->numG, sizeof(double));
tmprlt->soln = (double *) calloc(simu_opts->num_stability*topoinfo->numG, sizeof(double));
tmprlt->paras = (double *) calloc(3*topoinfo->numR+2*topoinfo->numG, sizeof(double));
for (i = 0; i < topoinfo->numG; i++) {
// fscanf(fcfg, "%*[^\n]\n", NULL);
fscanf(fcfg, "%d\t%s\n", &tmpID, topoinfo->Gname[i]);
// printf("cfg_%d\t%s\n", tmpID, topoinfo->Gname[i]);
}
for (i = 0; i < topoinfo->numR; i++) {
fscanf(fcfg, "%d\t%d\t%d\t%d\n", &tmpID, &tmpSourceG, &tmpTargetG, topoinfo->TypeR + i);
topoinfo->SourceG[i] = tmpSourceG - 1;
topoinfo->TargetG[i] = tmpTargetG - 1;
// printf("%d\t%d\t%d\t%d\n", tmpID, topoinfo->SourceG[i], topoinfo->TargetG[i], topoinfo->TypeR[i]);
}
for(i = 0; i < topoinfo->numG; i++){
for (j = 0; j < topoinfo->numR; j++){
if (topoinfo->TargetG[j] == i){
topoinfo->ParasPos[j] = 3*cntP + 2*topoinfo->numG; // Position of threshold parameters for each regulation.
cntP = cntP + 1;
}
}
}
}
/*********RACIPE Functions*********/
void run_RACIPE(struct opts *simu_opts, struct topo *topoinfo, struct rlt *maintmprlt)
{
double begin, end;
double time_spent = 0.0;
int i = 0;
omp_set_num_threads(threads);
begin = omp_get_wtime();
int perc=100;
// Seed the random value generator
pcg32_srandom((uint64_t)(((uint64_t)time(NULL) ^ (intptr_t)&printf)*simu_opts->myseed), 54u);
//initialize tmprlt for each thread
struct rlt *threadtmprlt[threads];
for(int u=0;u<threads;u++){
threadtmprlt[u]=(struct rlt*)malloc(sizeof(struct rlt));
threadtmprlt[u]->Nstb = 0;
threadtmprlt[u]->numover = (int *) calloc(topoinfo->numG, sizeof(int));
threadtmprlt[u]->numdown = (int *) calloc(topoinfo->numG, sizeof(int));
threadtmprlt[u]->cnt_store = (int *) calloc(simu_opts->num_stability, sizeof(int));
threadtmprlt[u]->y_store = (double *) calloc(simu_opts->num_ode*topoinfo->numG, sizeof(double));
threadtmprlt[u]->soln = (double *) calloc(simu_opts->num_stability*topoinfo->numG, sizeof(double));
threadtmprlt[u]->paras = (double *) calloc(3*topoinfo->numR+2*topoinfo->numG, sizeof(double));
}
#pragma omp parallel for
for (i = 1; i <= simu_opts->num_paras; i++){
int j = 0;
//accessing thread copy of tmprlt
struct rlt *tmprlt=threadtmprlt[omp_get_thread_num()];
//setting stable state counts to 0 and clearing numdown/numover
for(int u=0;u<simu_opts->num_stability;u++){
tmprlt->cnt_store[u]=0;
}
for(int u=0;u<topoinfo->numG;u++){
tmprlt->numdown[u]=0;
tmprlt->numover[u]=0;
}
#pragma omp critical
{
countstartmodels++;
if((countstartmodels-1)%(simu_opts->num_paras/perc)==0){
printf("\r%d%% %fs",( (countstartmodels)/(simu_opts->num_paras/perc) )*(100/perc),omp_get_wtime()-begin);
fflush(stdout);
//printf("%d\n",(clock()-begin)/CLOCKS_PER_SEC);
//printf(" \r",
}
}
set_parameters(simu_opts, topoinfo, tmprlt);
// printf("RACIPELIB 1514");
for (j = 0; j < simu_opts->num_ode; j++){
if (simu_opts->solver == 1) {
solve_ODE_euler(j, simu_opts, topoinfo, tmprlt);
}
else {
solve_ODE_rk45(j, simu_opts, topoinfo, tmprlt);
}
}
#pragma omp critical
{
count_state (simu_opts, topoinfo, tmprlt);
save_model_paras(simu_opts, topoinfo, tmprlt, countfinmodels+1);
save_model_solns(simu_opts, topoinfo, tmprlt, countfinmodels+1);
T_test (simu_opts, topoinfo, tmprlt, countfinmodels+1);
if (simu_opts->SBML_model == i){
export_SBML_model(simu_opts, topoinfo, tmprlt, countfinmodels+1);
}
countfinmodels++;
//update maintmprlt
for(int u=0;u<simu_opts->num_stability;u++){
maintmprlt->cnt_store[u]+= tmprlt->cnt_store[u];
}
for(int u=0;u<topoinfo->numG;u++){
maintmprlt->numover[u]+= tmprlt->numover[u];
maintmprlt->numdown[u]+= tmprlt->numdown[u];
}
}
end = omp_get_wtime();
time_spent = end-begin;
if (time_spent/3600 >= simu_opts->maxtime){
printf("### Warning: Time-out!\n");
// break; //OpenMP cannot break within a loop, exit instead.
exit(1);
}
}
//final percentage change
printf("\r%d%% %fs",( (countstartmodels)/(simu_opts->num_paras/perc) )*(100/perc),omp_get_wtime()-begin);
fflush(stdout);
// Screen printout
if (simu_opts->Toggle_T_test == 1){
printf("\n-------------------T_test------------------\n");
printf("Gene_ID -- Probs_over_T\n");
for (i = 0; i < topoinfo->numG; i++){
printf("%d -- %f\n", i+1, (double)maintmprlt->numover[i]/(double)(maintmprlt->numover[i]+maintmprlt->numdown[i]));
}
}
printf("\n-----------------Stability-----------------\n");
printf("#states -- Count\n");
for (i = 0; i < simu_opts->num_stability; i++){
printf("%d -- %d\n", i+1, maintmprlt->cnt_store[i]);
}
end = omp_get_wtime();
time_spent = end-begin;
printf("---> Actual running time : %f seconds ( %f hours)\n", time_spent, time_spent/3600.0);
printf("The maximum running time is %f.\n", simu_opts->maxtime);
//freeing memory
for(int u=0;u<threads;u++){
free(threadtmprlt[u]);
}
release_memory (simu_opts, topoinfo, maintmprlt);
}
void save_model_paras(struct opts *simu_opts, struct topo *topoinfo, struct rlt *tmprlt, int num)
{
static FILE *f_p = NULL;
char fpname [100] = "";
int i = 0;
int cnt = tmprlt->Nstb;
char KDIDname[100] = "";
char OEIDname[100] = "";
char DEIDname[100] = "";
if (simu_opts->Toggle_f_p == 1) {
if (f_p == NULL) {
strcpy(fpname, topoinfo->modelname);
if (simu_opts->exts == 0){
if (simu_opts->numKD != 0){
strcat(fpname, "_KD");
for (i = 0; i < simu_opts->numKD; i++){
sprintf(KDIDname, "%d", simu_opts->KDID[i]);
strcat(fpname, "_");
strcat(fpname, KDIDname);
}
}
if (simu_opts->numOE != 0){
strcat(fpname, "_OE");
for (i = 0; i < simu_opts->numOE; i++){
sprintf(OEIDname, "%d", simu_opts->OEID[i]);
strcat(fpname, "_");
strcat(fpname, OEIDname);
}
}
if (simu_opts->numDE != 0){
strcat(fpname, "_DE");
for (i = 0; i < simu_opts->numDE; i++){
sprintf(DEIDname, "%d", simu_opts->DEID[i]);
strcat(fpname, "_");
strcat(fpname, DEIDname);
}
}
}
strcat(fpname, "_parameters");
strcat(fpname, ".dat");
f_p = fopen(fpname,"w");
}
fprintf(f_p, "%d\t%d", num, cnt);
for (i = 0; i < 3*topoinfo->numR+2*topoinfo->numG; i++){
fprintf(f_p, "\t%f", tmprlt->paras[i]);
}
fprintf(f_p, "\n");
if (simu_opts->num_paras == num){
fclose(f_p);
f_p = NULL;
}
}
}
void save_model_solns(struct opts *simu_opts, struct topo *topoinfo, struct rlt *tmprlt, int num)
{
static FILE **f_s;
int cnt = tmprlt->Nstb;
int n=simu_opts->num_stability;
if (1==num) {
f_s = (FILE **) calloc (simu_opts->num_stability +1, sizeof(FILE *));
}
char fsname[simu_opts->num_stability][100];
char tmpparasname[100] = "";
int i = 0;
int h = 0;
int h2= 0;
char KDIDname[100] = "";
char OEIDname[100] = "";
char DEIDname[100] = "";
if (f_s[cnt-1] == NULL) {
sprintf(tmpparasname, "%d", cnt);
strcpy (fsname[cnt-1], topoinfo->modelname);
if (simu_opts->exts == 0){
if (simu_opts->numKD != 0){
strcat(fsname[cnt-1], "_KD");
for (i = 0; i < simu_opts->numKD; i++){
sprintf(KDIDname, "%d", simu_opts->KDID[i]);
strcat(fsname[cnt-1], "_");
strcat(fsname[cnt-1], KDIDname);
}
}
if (simu_opts->numOE != 0){
strcat(fsname[cnt-1], "_OE");
for (i = 0; i < simu_opts->numOE; i++){
sprintf(OEIDname, "%d", simu_opts->OEID[i]);
strcat(fsname[cnt-1], "_");
strcat(fsname[cnt-1], OEIDname);
}
}
if (simu_opts->numDE != 0){
strcat(fsname[cnt-1], "_DE");
for (i = 0; i < simu_opts->numDE; i++){
sprintf(DEIDname, "%d", simu_opts->DEID[i]);
strcat(fsname[cnt-1], "_");
strcat(fsname[cnt-1], DEIDname);
}
}
}
strcat (fsname[cnt-1], "_solution");
strcat (fsname[cnt-1], "_");
strcat (fsname[cnt-1], tmpparasname);
strcat (fsname[cnt-1], ".dat");
f_s[cnt-1] = fopen(fsname[cnt-1],"w");
}
if (f_s[n] == NULL) {
//sprintf(tmpparasname, "%d", cnt);
strcpy (fsname[n], topoinfo->modelname);
if (simu_opts->exts == 0){
if (simu_opts->numKD != 0){
strcat(fsname[n], "_KD");
for (i = 0; i < simu_opts->numKD; i++){
sprintf(KDIDname, "%d", simu_opts->KDID[i]);
strcat(fsname[n], "_");
strcat(fsname[n], KDIDname);
}
}
if (simu_opts->numOE != 0){
strcat(fsname[n], "_OE");
for (i = 0; i < simu_opts->numOE; i++){
sprintf(OEIDname, "%d", simu_opts->OEID[i]);
strcat(fsname[n], "_");
strcat(fsname[n], OEIDname);
}
}
if (simu_opts->numDE != 0){
strcat(fsname[n], "_DE");
for (i = 0; i < simu_opts->numDE; i++){
sprintf(DEIDname, "%d", simu_opts->DEID[i]);
strcat(fsname[n], "_");
strcat(fsname[n], DEIDname);
}
}
}
strcat (fsname[n], "_solution");
//strcat (fsname[n], "_");
//strcat (fsname[cnt-1], tmpparasname);
strcat (fsname[n], ".dat");
f_s[n] = fopen(fsname[n],"w");
}
fprintf(f_s[cnt-1], "%d\t%d", num, cnt);
fprintf(f_s[n], "%d\t%d", num, cnt);
for (h = 0; h < cnt; h++){
h2 = 1;
while (h2 <= topoinfo->numG) {
fprintf(f_s[cnt-1], "\t%f", log2(tmprlt->soln[topoinfo->numG*h + h2 - 1]));
fprintf(f_s[n], "\t%f", log2(tmprlt->soln[topoinfo->numG*h + h2 - 1]));
h2++;
}
if(h!=cnt-1){
fprintf(f_s[n],"\n");
fprintf(f_s[n], "%d\t%d", num, cnt);
}
}
fprintf(f_s[cnt-1], "\n");
fprintf(f_s[n],"\n");
if (simu_opts->num_paras == num) {
for (i = 0; i < simu_opts->num_stability +1 ; i++){
if (f_s[i] != NULL){
fclose(f_s[i]);
}
}
free(f_s);
}
}
void export_SBML_model (struct opts *simu_opts, struct topo *topoinfo, struct rlt *tmprlt, int modelID)
{
FILE *f_sbml = NULL;
char fpname [100] = "";
int h = 0;
int i = 0;
int j = 0;
int cnt = tmprlt->Nstb;
char KDIDname[100] = "";
char OEIDname[100] = "";
char DEIDname[100] = "";
char modelIDname[100] = "";
char nstbname[100] = "";
for (h = 1; h <= cnt; h++){
// printf("stable state is %d\n", cnt);
if (f_sbml == NULL) {
strcpy(fpname, topoinfo->modelname);
if (simu_opts->exts == 0){
if (simu_opts->numKD != 0){
strcat(fpname, "_KD");
for (i = 0; i < simu_opts->numKD; i++){
sprintf(KDIDname, "%d", simu_opts->KDID[i]);
strcat(fpname, "_");
strcat(fpname, KDIDname);
}
}
if (simu_opts->numOE != 0){
strcat(fpname, "_OE");
for (i = 0; i < simu_opts->numOE; i++){
sprintf(OEIDname, "%d", simu_opts->OEID[i]);
strcat(fpname, "_");
strcat(fpname, OEIDname);
}
}
if (simu_opts->numDE != 0){
strcat(fpname, "_DE");
for (i = 0; i < simu_opts->numDE; i++){
sprintf(DEIDname, "%d", simu_opts->DEID[i]);
strcat(fpname, "_");
strcat(fpname, DEIDname);
}
}
}
strcat(fpname, "_sbml");
sprintf(modelIDname, "%d", modelID);
strcat(fpname, "_");
strcat(fpname, modelIDname);
sprintf(nstbname, "%d", h);
strcat(fpname, "_");
strcat(fpname, nstbname);
strcat(fpname, ".xml");
f_sbml = fopen(fpname,"w");
}
// write the header
fprintf(f_sbml, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
fprintf(f_sbml, "<sbml level=\"2\" version=\"3\" xmlns=\"http://www.sbml.org/sbml/level2/version3\">\n");
fprintf(f_sbml, "\t<model name=\"No_%d_model_%d\">\n", modelID, h);
// write the defined function
fprintf(f_sbml, "\t\t<listOfFunctionDefinitions>\n");
fprintf(f_sbml, "\t\t\t<functionDefinition id=\"hillfunction\">\n");
fprintf(f_sbml, "\t\t\t\t<math xmlns=\"http://www.w3.org/1998/Math/MathML\"\n");
fprintf(f_sbml, "\t\t\t\t\txmlns:sbml=\"http://www.sbml.org/sbml/level3/version2/core\">\n");
fprintf(f_sbml, "\t\t\t\t\t<lambda>\n");
fprintf(f_sbml, "\t\t\t\t\t\t<bvar><ci> x </ci></bvar>\n");
fprintf(f_sbml, "\t\t\t\t\t\t<bvar><ci> x0 </ci></bvar>\n");
fprintf(f_sbml, "\t\t\t\t\t\t<bvar><ci> lamda </ci></bvar>\n");
fprintf(f_sbml, "\t\t\t\t\t\t<bvar><ci> nx </ci></bvar>\n");
fprintf(f_sbml, "\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t<plus/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t<ci> lamda </ci>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t<times/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<minus/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<cn>1.0</cn>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<ci>lamda</ci>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<divide/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<cn>1.0</cn>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t<plus/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t<cn>1.0</cn>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t\t<power/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t\t\t<divide/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t\t\t<ci>x</ci>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t\t\t<ci>x0</ci>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t\t<ci>nx</ci>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t</lambda>\n");
fprintf(f_sbml, "\t\t\t\t</math>\n");
fprintf(f_sbml, "\t\t\t</functionDefinition>\n");
fprintf(f_sbml, "\t\t</listOfFunctionDefinitions>\n");
// write the list of species
fprintf(f_sbml, "\t\t<listOfSpecies>\n");
for (i=0; i<topoinfo->numG; i++){
fprintf(f_sbml, "\t\t\t<species id=\"x%d\" initialAmount=\"%f\" name=\"%s\"/>\n", i, tmprlt->soln[topoinfo->numG*(h-1) + i], topoinfo->Gname[i]);
}
fprintf(f_sbml, "\t\t</listOfSpecies>\n");
// write the list of parameters
fprintf(f_sbml, "\t\t<listOfParameters>\n");
// production
for (i=0; i<topoinfo->numG; i++){
fprintf(f_sbml, "\t\t\t<parameter id=\"g%d\" value=\"%f\"/>\n", i, tmprlt->paras[i]);
}
// degradation
for (i=0; i<topoinfo->numG; i++){
fprintf(f_sbml, "\t\t\t<parameter id=\"k%d\" value=\"%f\"/>\n", i, tmprlt->paras[i+topoinfo->numG]);
}
// Threshold
for (i = 0; i < topoinfo->numR; i++){
fprintf(f_sbml, "\t\t\t<parameter id=\"T%d\" value=\"%f\"/>\n", 3*i + 2*topoinfo->numG, tmprlt->paras[3*i + 2*topoinfo->numG]);
}
// Coefficient
for (i = 0; i < topoinfo->numR; i++){
fprintf(f_sbml, "\t\t\t<parameter id=\"n%d\" value=\"%f\"/>\n", 3*i + 2*topoinfo->numG + 1, tmprlt->paras[3*i + 2*topoinfo->numG + 1]);
}
// lambda
for (i = 0; i < topoinfo->numR; i++){
if (topoinfo->prsrandrange[2][3*i + 2 + 2*topoinfo->numG] == 1) { // Activation
fprintf(f_sbml, "\t\t\t<parameter id=\"lambda%d\" value=\"%f\"/>\n", 3*i + 2*topoinfo->numG + 2, tmprlt->paras[3*i + 2*topoinfo->numG + 2]);
}
else if (topoinfo->prsrandrange[2][3*i + 2 + 2*topoinfo->numG] == 2) { // Inhibition
fprintf(f_sbml, "\t\t\t<parameter id=\"lambda%d\" value=\"%f\"/>\n", 3*i + 2*topoinfo->numG + 2, tmprlt->paras[3*i + 2*topoinfo->numG + 2]);
}
}
fprintf(f_sbml, "\t\t</listOfParameters>\n");
// write the list of reastions
fprintf(f_sbml, "\t\t<listOfReactions>\n");
for(i = 0; i < topoinfo->numG; i++){
fprintf(f_sbml, "\t\t\t<reaction id=\"eq_%d\">\n", i+1);
fprintf(f_sbml, "\t\t\t\t<listOfProducts>\n");
fprintf(f_sbml, "\t\t\t\t\t<speciesReference species=\"x%d\" />\n", i);
fprintf(f_sbml, "\t\t\t\t</listOfProducts>\n");
fprintf(f_sbml, "\t\t\t\t<kineticLaw>\n");
fprintf(f_sbml, "\t\t\t\t\t<math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n");
fprintf(f_sbml, "\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t<minus/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t<times/>\n");
// production
fprintf(f_sbml, "\t\t\t\t\t\t\t\t<ci>g%d</ci>\n", i);
// regulation
for (j = 0; j < topoinfo->numR; j++){
if (topoinfo->TargetG[j] == i){
if (topoinfo->TypeR[j] == 1){ //Activation
// fprintf(f_model, "*(Hillshift(ytmp(%d), p(%d), p(%d), p(%d)/p(%d)))", topoinfo->SourceG[j]+1, 2*topoinfo->numG+3*(count-1)+1, 2*topoinfo->numG+3*(count-1)+1+1, 2*topoinfo->numG+3*(count-1)+2+1, 2*topoinfo->numG+3*(count-1)+2+1);
fprintf(f_sbml, "\t\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<divide/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t<ci>hillfunction</ci>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t<ci>x%d</ci>\n", topoinfo->SourceG[j]);
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t<ci>T%d</ci>\n", 2*topoinfo->numG+3*j); //threshold
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t<ci>lambda%d</ci>\n", 2*topoinfo->numG+3*j+2); //fold change
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t\t<ci>n%d</ci>\n", 2*topoinfo->numG+3*j+1); //coefficient
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<ci>lambda%d</ci>\n", 2*topoinfo->numG+3*j+2); //fold change
fprintf(f_sbml, "\t\t\t\t\t\t\t\t</apply>\n");
}
else if (topoinfo->TypeR[j] == 2) { //Inhibition
// fprintf(f_model, "*Hillshift(ytmp(%d), p(%d), p(%d), p(%d))", topoinfo->SourceG[j]+1, 2*topoinfo->numG+3*(count-1)+1, 2*topoinfo->numG+3*(count-1)+1+1, 2*topoinfo->numG+3*(count-1)+2+1);
fprintf(f_sbml, "\t\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<ci>hillfunction</ci>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<ci>x%d</ci>\n", topoinfo->SourceG[j]);
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<ci>T%d</ci>\n", 2*topoinfo->numG+3*j); //threshold
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<ci>lambda%d</ci>\n", 2*topoinfo->numG+3*j+2); //fold change
fprintf(f_sbml, "\t\t\t\t\t\t\t\t\t<ci>n%d</ci>\n", 2*topoinfo->numG+3*j+1); //coefficient
fprintf(f_sbml, "\t\t\t\t\t\t\t\t</apply>\n");
}
}
}
fprintf(f_sbml, "\t\t\t\t\t\t\t</apply>\n");
// degradation
fprintf(f_sbml, "\t\t\t\t\t\t\t<apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t<times/>\n");
fprintf(f_sbml, "\t\t\t\t\t\t\t\t<ci>k%d</ci>\n", i);
fprintf(f_sbml, "\t\t\t\t\t\t\t\t<ci>x%d</ci>\n", i);
fprintf(f_sbml, "\t\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t\t</apply>\n");
fprintf(f_sbml, "\t\t\t\t\t</math>\n");
fprintf(f_sbml, "\t\t\t\t</kineticLaw>\n");
fprintf(f_sbml, "\t\t\t</reaction>\n");
}
fprintf(f_sbml, "\t\t</listOfReactions>\n");
// close the blocks
fprintf(f_sbml, "\t</model>\n");
fprintf(f_sbml, "</sbml>\n");
fclose(f_sbml);
f_sbml = NULL;
}
}
void set_parameters (struct opts *simu_opts, struct topo *topoinfo, struct rlt *tmprlt)
{
int i = 0;
switch (simu_opts->dist) {
case 1:
// Production rate
for (i = 0; i < topoinfo->numG; i++){
tmprlt->paras[i] = randu(topoinfo->prsrandrange[0][i], topoinfo->prsrandrange[1][i]);
}
// Degradation rate
for (i = 0; i < topoinfo->numG; i++){
tmprlt->paras[i + topoinfo->numG] = randu(topoinfo->prsrandrange[0][i + topoinfo->numG], topoinfo->prsrandrange[1][i + topoinfo->numG]);
}
// Threshold
for (i = 0; i < topoinfo->numR; i++){
tmprlt->paras[3*i + 2*topoinfo->numG] = randu(topoinfo->prsrandrange[0][3*i + 2*topoinfo->numG], topoinfo->prsrandrange[1][3*i + 2*topoinfo->numG]);
}
// Coefficient
for (i = 0; i < topoinfo->numR; i++){
tmprlt->paras[3*i + 1 + 2*topoinfo->numG] = randd(topoinfo->prsrandrange[0][3*i + 1 + 2*topoinfo->numG], topoinfo->prsrandrange[1][3*i + 1 + 2*topoinfo->numG], simu_opts->dist);
}
// lambda
for (i = 0; i < topoinfo->numR; i++){
if (topoinfo->prsrandrange[2][3*i + 2 + 2*topoinfo->numG] == 1) { // Activation
tmprlt->paras[3*i + 2 + 2*topoinfo->numG] = randu(topoinfo->prsrandrange[0][3*i + 2 + 2*topoinfo->numG], topoinfo->prsrandrange[1][3*i + 2 + 2*topoinfo->numG]);
}
else if (topoinfo->prsrandrange[2][3*i + 2 + 2*topoinfo->numG] == 2) { // Inhibition
tmprlt->paras[3*i + 2 + 2*topoinfo->numG] = 1.0/randu(topoinfo->prsrandrange[0][3*i + 2 + 2*topoinfo->numG], topoinfo->prsrandrange[1][3*i + 2 + 2*topoinfo->numG]);
}
}
break;
case 2:
// Production rate
for (i = 0; i < topoinfo->numG; i++){
tmprlt->paras[i] = randpg(topoinfo->prsrandrange[0][i], topoinfo->prsrandrange[1][i]);
}
// Degradation rate
for (i = 0; i < topoinfo->numG; i++){
tmprlt->paras[i + topoinfo->numG] = randpg(topoinfo->prsrandrange[0][i + topoinfo->numG], topoinfo->prsrandrange[1][i + topoinfo->numG]);
}
// Threshold
for (i = 0; i < topoinfo->numR; i++){
tmprlt->paras[3*i + 2*topoinfo->numG] = randpg(topoinfo->prsrandrange[0][3*i + 2*topoinfo->numG], topoinfo->prsrandrange[1][3*i + 2*topoinfo->numG]);
}
// Coefficient
for (i = 0; i < topoinfo->numR; i++){
tmprlt->paras[3*i + 1 + 2*topoinfo->numG] = randd(topoinfo->prsrandrange[0][3*i + 1 + 2*topoinfo->numG], topoinfo->prsrandrange[1][3*i + 1 + 2*topoinfo->numG], simu_opts->dist);
}
// lambda
for (i = 0; i < topoinfo->numR; i++){
if (topoinfo->prsrandrange[2][3*i + 2 + 2*topoinfo->numG] == 1) { // Activation
tmprlt->paras[3*i + 2 + 2*topoinfo->numG] = randfd(topoinfo->prsrandrange[0][3*i + 2 + 2*topoinfo->numG], topoinfo->prsrandrange[1][3*i + 2 + 2*topoinfo->numG], simu_opts->dist);
}
else if (topoinfo->prsrandrange[2][3*i + 2 + 2*topoinfo->numG] == 2) { // Inhibition
tmprlt->paras[3*i + 2 + 2*topoinfo->numG] = 1.0/randfd(topoinfo->prsrandrange[0][3*i + 2 + 2*topoinfo->numG], topoinfo->prsrandrange[1][3*i + 2 + 2*topoinfo->numG], simu_opts->dist);
}
}
break;
case 3:
// Production rate
for (i = 0; i < topoinfo->numG; i++){
tmprlt->paras[i] = randexp(topoinfo->prsrandrange[0][i]);
}
// Degradation rate
for (i = 0; i < topoinfo->numG; i++){
tmprlt->paras[i + topoinfo->numG] = randexp(topoinfo->prsrandrange[0][i + topoinfo->numG]);
}
// Threshold
for (i = 0; i < topoinfo->numR; i++){
tmprlt->paras[3*i + 2*topoinfo->numG] = randexp(topoinfo->prsrandrange[0][3*i + 2*topoinfo->numG]);
}
// Coefficient
for (i = 0; i < topoinfo->numR; i++){
tmprlt->paras[3*i + 1 + 2*topoinfo->numG] = randd(topoinfo->prsrandrange[0][3*i + 1 + 2*topoinfo->numG], topoinfo->prsrandrange[1][3*i + 1 + 2*topoinfo->numG], simu_opts->dist);
}
// lambda
for (i = 0; i < topoinfo->numR; i++){
if (topoinfo->prsrandrange[2][3*i + 2 + 2*topoinfo->numG] == 1) { // Activation
tmprlt->paras[3*i + 2 + 2*topoinfo->numG] = randfd(topoinfo->prsrandrange[0][3*i + 2 + 2*topoinfo->numG], 0, simu_opts->dist);
}
else if (topoinfo->prsrandrange[2][3*i + 2 + 2*topoinfo->numG] == 2) { // Inhibition
tmprlt->paras[3*i + 2 + 2*topoinfo->numG] = 1.0/randfd(topoinfo->prsrandrange[0][3*i + 2 + 2*topoinfo->numG], 0, simu_opts->dist);
}
}
break;
}
if (simu_opts->numKD != 0) {
for (i = 0; i < simu_opts->numKD; i++){
if (simu_opts->KDID[i] <= topoinfo->numG) {
tmprlt->paras[simu_opts->KDID[i] - 1] = 0.0;
}
else {
tmprlt->paras[topoinfo->ParasPos[simu_opts->KDID[i] - topoinfo->numG - 1] + 2] = 1.0;
}
}
}
if (simu_opts->numOE != 0) {
for (i = 0; i < simu_opts->numOE; i++){
if (simu_opts->OEID[i] <= topoinfo->numG) {
// printf("%d\t%f\t", simu_opts->OEID[i], tmprlt->paras[simu_opts->OEID[i] - 1]);
tmprlt->paras[simu_opts->OEID[i] - 1] = tmprlt->paras[simu_opts->OEID[i] - 1]*simu_opts->OEFD[i];
// printf("%f\n", tmprlt->paras[simu_opts->OEID[i] - 1]);
}
else {
printf("### Wrong: no gene (#%d) is selected for overexpression.\n", simu_opts->OEID[i]);
}
}
}
if (simu_opts->numDE != 0) {
for (i = 0; i < simu_opts->numDE; i++){
if (simu_opts->DEID[i] <= topoinfo->numG) {
// printf("%d\t%f\t", simu_opts->DEID[i], tmprlt->paras[simu_opts->DEID[i] - 1]);
tmprlt->paras[simu_opts->DEID[i] - 1] = tmprlt->paras[simu_opts->DEID[i] - 1]/simu_opts->DEFD[i];
// printf("%f\n", tmprlt->paras[simu_opts->DEID[i] - 1]);
}
else {
printf("### Wrong: no gene (#%d) is selected for overexpression.\n", simu_opts->DEID[i]);
}
}
}
}
void model_ODE(double t, double *ytmp, double *yp, double *p, struct topo *topoinfo)
{
int i = 0;
for(i = 0; i < topoinfo->numG; i++){
yp[i] = p[i];
}
for (i = 0; i < topoinfo->numR; i++){
if (topoinfo->TypeR[i] == 1){ //Activation
yp[topoinfo->TargetG[i]] = yp[topoinfo->TargetG[i]]*(Hillshift(ytmp[topoinfo->SourceG[i]], p[topoinfo->ParasPos[i]], p[topoinfo->ParasPos[i]+1], p[topoinfo->ParasPos[i]+2])/p[topoinfo->ParasPos[i]+2]);
}
else if (topoinfo->TypeR[i] == 2){ //Inhibition
yp[topoinfo->TargetG[i]] = yp[topoinfo->TargetG[i]]*Hillshift(ytmp[topoinfo->SourceG[i]], p[topoinfo->ParasPos[i]], p[topoinfo->ParasPos[i]+1], p[topoinfo->ParasPos[i]+2]);
}
}
for(i = 0; i < topoinfo->numG; i++){
yp[i] = yp[i] - p[i + topoinfo->numG]*ytmp[i];
}
}
void RIVs(double *y, double *ytmp, double *p, struct topo *topoinfo)
{
int i = 0;
int j = 0;
double minV = 0;
double maxV = 0;
// printf("test_RIVs\n");
for(i = 0; i < topoinfo->numG; i++){
if (p[i] != 0){
minV = p[i];
maxV = p[i];
for (j = 0; j < topoinfo->numR; j++){
if (topoinfo->TargetG[j] == i){
if (topoinfo->TypeR[j] == 1){ //Activation
minV = minV*(1.0/p[topoinfo->ParasPos[j] + 2]);
// printf("%f\t", p[topoinfo->ParasPos[j] + 2]);
}
else if (topoinfo->TypeR[j] == 2) { //Inhibition
minV = minV*(p[topoinfo->ParasPos[j] + 2]);
// printf("%f\t", p[topoinfo->ParasPos[j] + 2]);
}
}
}
// printf("\n");
minV = minV/p[topoinfo->numG + i];
maxV = maxV/p[topoinfo->numG + i];
y[i] = exp2(randu(log2(minV), log2(maxV)));
}
else {
y[i] = 0.0;
ytmp[i] = 0.0;
}
}
// printf("--------------------------------\n");
// printf("\n");
}
// solve the ODE by 1st Euler method
void solve_ODE_euler (int j, struct opts *simu_opts, struct topo *topoinfo, struct rlt *tmprlt)
{
int n_step = 1000;
int i_step = 1;
int i = 0;
double testdelta = 0.0;
double t = 0.0;
double t_start = 0.0;
double t_stop = 0.0;
double *y;
double *yp;
double *ytmp;
y = (double *)calloc(topoinfo->numG, sizeof(double));
yp = (double *)calloc(topoinfo->numG, sizeof(double));
ytmp = (double *)calloc(topoinfo->numG, sizeof(double));
for (i = 0; i < topoinfo->numG; i++){
ytmp[i] = 2000.0;
}
int cnt_loop = 0;
RIVs(y, ytmp, tmprlt->paras, topoinfo);
testdelta = sumdelta(y, ytmp, topoinfo->numG);
while (testdelta >1e-7 && cnt_loop < simu_opts->maxiters) {
t_start = t_stop;
t_stop = t_stop + 100;
cnt_loop = cnt_loop + 1;
for ( i_step = 1; i_step <= n_step; i_step++ )
{
for (i = 0; i < topoinfo->numG; i++){
ytmp[i] = y[i];
}
model_ODE ( t, ytmp, yp, tmprlt->paras, topoinfo );
for (i = 0; i < topoinfo->numG; i++){
y[i] = ytmp[i] + yp[i]*simu_opts->stepsize;
}
testdelta = sumdelta(y, ytmp, topoinfo->numG);
if(testdelta<1e-7){
break;
}
t = t + simu_opts->stepsize;
}
testdelta = sumdelta(y, ytmp, topoinfo->numG);
}
for (i = 0; i < topoinfo->numG; i++){
tmprlt->y_store[topoinfo->numG*j + i] = y[i];
}
free(y);
free(yp);
free(ytmp);
}
// solve the ODE by RK-45
void solve_ODE_rk45 (int j, struct opts *simu_opts, struct topo *topoinfo, struct rlt *tmprlt)
{
double abserr;
double relerr;
int flag;
int n_step = 100;
int i_step = 1;
int i = 0;
double testdelta = 0.0;
double t = 0.0;
double t_out = 0.0;
double t_start = 0.0;
double t_stop = 0.0;
double *y;
double *yp;
double *ytmp;
abserr = r4_epsilon ( ); //sqrt ( r4_epsilon ( ) );
relerr = r4_epsilon ( ); //sqrt ( r4_epsilon ( ) );
y = (double *)calloc(topoinfo->numG, sizeof(double));
yp = (double *)calloc(topoinfo->numG, sizeof(double));
ytmp = (double *)calloc(topoinfo->numG, sizeof(double));
for (i = 0; i < topoinfo->numG; i++){
ytmp[i] = 2000.0;
}
int cnt_loop = 0;
RIVs(y, ytmp, tmprlt->paras, topoinfo);
testdelta = sumdelta(y, ytmp, topoinfo->numG);
while (testdelta >1e-7 && cnt_loop < simu_opts->maxiters) {
t_start = t_stop;
t_stop = t_stop + 100;
cnt_loop = cnt_loop + 1;
model_ODE ( t, ytmp, yp, tmprlt->paras, topoinfo);
flag = 1;
// printf("RACIPELIB 2008");
for ( i_step = 1; i_step <= n_step; i_step++ )
{
for (i = 0; i < topoinfo->numG; i++){
ytmp[i] = y[i];
}
t = ( ( double ) ( n_step - i_step + 1 ) * t_start
+ ( double ) ( i_step - 1 ) * t_stop )
/ ( double ) ( n_step );
t_out = ( ( double ) ( n_step - i_step ) * t_start
+ ( double ) ( i_step ) * t_stop )
/ ( double ) ( n_step );
flag = r4_rkf45 (model_ODE, topoinfo->numG, y, yp, tmprlt->paras, topoinfo, &t, t_out, &relerr, abserr, flag );
// testdelta = sumdelta(y, ytmp, topoinfo->numG);
// if(testdelta<=1e-7){
// break;
// }
// printf ( "%4d %12f\n", flag, t);
// if (i_step == n_step - 1) {
// printf ( "%4d %12f %12f %12f\n", flag, t, y[0], y[1]);
// }
}
testdelta = sumdelta(y, ytmp, topoinfo->numG);
// printf ( "%4d %12f %12f %12f\n", flag, t, y[0], y[1]);
}
for (i = 0; i < topoinfo->numG; i++){
tmprlt->y_store[topoinfo->numG*j + i] = y[i];
}
free(y);
free(yp);
free(ytmp);
}
void count_state (struct opts *simu_opts, struct topo *topoinfo, struct rlt *tmprlt)
// detect the stable states from all the solutions from different RIVs
{
int i = 0;
int j = 0;
int h = 0;
int count = 0;
int cnt = 1;
double delta = 0.0;
double sumpow = 0.0;
for (h = 1; h <= topoinfo->numG; h++){
tmprlt->soln[h - 1] = tmprlt->y_store[h - 1];
}
for (i = 2; i <= simu_opts->num_ode; i++){
count = 0;
for (j = 1; j <= cnt; j++){
h = 1;
sumpow = 0.0;
while (h <= topoinfo->numG) {
sumpow = sumpow + pow((tmprlt->y_store[topoinfo->numG*(i-1) + h - 1] - tmprlt->soln[topoinfo->numG*(j-1) + h - 1]), 2);
h++;
}
delta = sqrt(sumpow);
if (delta > simu_opts->thrd){
count = count + 1;
}
}
if (count == cnt){
cnt = cnt + 1;
if (cnt <= simu_opts->num_stability) {
for (h = 1; h <= topoinfo->numG; h++){
tmprlt->soln[(cnt-1)*topoinfo->numG + h - 1] = tmprlt->y_store[topoinfo->numG*(i-1) + h - 1];
}
}
else{
cnt = simu_opts->num_stability;
break;
}
}
}
tmprlt->cnt_store[cnt-1] = tmprlt->cnt_store[cnt-1] + 1;
tmprlt->Nstb = cnt;
}
void T_test(struct opts *simu_opts, struct topo *topoinfo, struct rlt *tmprlt, int num)
{
static FILE *f_test = NULL;
char ftname [100] = "";
int i = 0;
int j = 0;
int h = 0;
double tmp = 0.0;
int *localnumover;
int *localnumdown;
int cnt = tmprlt->Nstb;
char KDIDname[100] = "";
char OEIDname[100] = "";
char DEIDname[100] = "";
if (simu_opts->Toggle_T_test == 1) {
if (f_test == NULL) {
strcpy(ftname, topoinfo->modelname);
if (simu_opts->exts == 0){
if (simu_opts->numKD != 0){
strcat(ftname, "_KD");
for (i = 0; i < simu_opts->numKD; i++){
sprintf(KDIDname, "%d", simu_opts->KDID[i]);
strcat(ftname, "_");
strcat(ftname, KDIDname);
}
}
if (simu_opts->numOE != 0){
strcat(ftname, "_OE");
for (i = 0; i < simu_opts->numOE; i++){
sprintf(OEIDname, "%d", simu_opts->OEID[i]);
strcat(ftname, "_");
strcat(ftname, OEIDname);
}
}
if (simu_opts->numDE != 0){
strcat(ftname, "_DE");
for (i = 0; i < simu_opts->numDE; i++){
sprintf(DEIDname, "%d", simu_opts->DEID[i]);
strcat(ftname, "_");
strcat(ftname, DEIDname);
}
}
}
strcat(ftname, "_T_test");
strcat(ftname, ".dat");
f_test = fopen(ftname, "w");
}
localnumover = (int *) calloc(topoinfo->numG, sizeof(int));
localnumdown = (int *) calloc(topoinfo->numG, sizeof(int));
for (i = 0; i < cnt; i++){
for (j = 0; j < topoinfo->numG; j++){
for (h = 0; h < topoinfo->numR; h++){
if (topoinfo->SourceG[h] == j){
tmp = tmprlt->soln[topoinfo->numG*i + j]/tmprlt->paras[topoinfo->ParasPos[h]];
if (tmp >= 1){
localnumover[j] = localnumover[j] + 1;
}
else {
localnumdown[j] = localnumdown[j] + 1;
}
}
}
}
}
fprintf(f_test, "%d", num);
for (j = 0; j < topoinfo->numG; j++){
fprintf(f_test, "\t%d\t%d", localnumover[j], localnumdown[j]);
tmprlt->numover[j] = tmprlt->numover[j] + localnumover[j];
tmprlt->numdown[j] = tmprlt->numdown[j] + localnumdown[j];
}
fprintf(f_test, "\n");
free(localnumover);
free(localnumdown);
if (simu_opts->num_paras == num){
fclose(f_test);
f_test = NULL;
}
}
}
void release_memory(struct opts *simu_opts, struct topo *topoinfo, struct rlt *tmprlt)
{
int i = 0;
free(topoinfo->SourceG);
free(topoinfo->TargetG);
free(topoinfo->TypeR);
free(topoinfo->ParasPos);
free(topoinfo->modelname);
if (simu_opts->exts == 0){
for (i = 0; i < topoinfo->numG; i++) {
free(topoinfo->Gname[i]);
}
free(topoinfo->Gname);
}
free(topoinfo->prsrandrange[0]);
free(topoinfo->prsrandrange[1]);
free(topoinfo->prsrandrange[2]);
free(topoinfo->prsrandrange);
free(tmprlt->numover);
free(tmprlt->numdown);
free(tmprlt->cnt_store);
free(tmprlt->y_store);
free(tmprlt->soln);
free(tmprlt->paras);
free(simu_opts->KDID);
free(simu_opts->OEID);
free(simu_opts->OEFD);
free(simu_opts->DEID);
free(simu_opts->DEFD);
}
|
potential.h | // Copyright (c) 2013-2016 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file potential.h
*
* \brief Contains declaration and partial implementation of sirius::Potential class.
*/
#ifndef __POTENTIAL_H__
#define __POTENTIAL_H__
#include "periodic_function.h"
#include "spheric_function.h"
#include "simulation_context.h"
#include "density.h"
namespace sirius {
/// Generate effective potential from charge density and magnetization.
/** \note At some point we need to update the atomic potential with the new MT potential. This is simple if the
effective potential is a global function. Otherwise we need to pass the effective potential between MPI ranks.
This is also simple, but requires some time. It is also easier to mix the global functions. */
class Potential
{
private:
Simulation_context& ctx_;
Unit_cell& unit_cell_;
Communicator const& comm_;
std::unique_ptr<Periodic_function<double>> effective_potential_;
Periodic_function<double>* effective_magnetic_field_[3];
Periodic_function<double>* hartree_potential_;
Periodic_function<double>* xc_potential_;
Periodic_function<double>* xc_energy_density_;
/// Local part of pseudopotential.
std::unique_ptr<Periodic_function<double>> local_potential_;
mdarray<double, 3> sbessel_mom_;
mdarray<double, 3> sbessel_mt_;
mdarray<double, 2> gamma_factors_R_;
int lmax_;
std::unique_ptr<SHT> sht_;
int pseudo_density_order_{9};
std::vector<double_complex> zil_;
std::vector<double_complex> zilm_;
std::vector<int> l_by_lm_;
mdarray<double_complex, 2> gvec_ylm_;
double energy_vha_;
/// Electronic part of Hartree potential.
/** Used to compute electron-nuclear contribution to the total energy */
mdarray<double, 1> vh_el_;
std::unique_ptr<Mixer<double>> mixer_{nullptr};
std::vector<XC_functional> xc_func_;
/// Plane-wave coefficients of the effective potential weighted by the unit step-function.
mdarray<double_complex, 1> veff_pw_;
/// Plane-wave coefficients of the inverse relativistic mass weighted by the unit step-function.
mdarray<double_complex, 1> rm_inv_pw_;
/// Plane-wave coefficients of the squared inverse relativistic mass weighted by the unit step-function.
mdarray<double_complex, 1> rm2_inv_pw_;
struct paw_potential_data_t
{
Atom *atom_{nullptr};
int ia{-1};
int ia_paw{-1};
mdarray<double, 3> ae_potential_; // TODO: -> Spheric_function
mdarray<double, 3> ps_potential_;
double hartree_energy_{0.0};
double xc_energy_{0.0};
double core_energy_{0.0};
double one_elec_energy_{0.0};
};
std::vector<double> paw_hartree_energies_;
std::vector<double> paw_xc_energies_;
std::vector<double> paw_core_energies_;
std::vector<double> paw_one_elec_energies_;
double paw_hartree_total_energy_{0.0};
double paw_xc_total_energy_{0.0};
double paw_total_core_energy_{0.0};
double paw_one_elec_energy_{0.0};
std::vector<paw_potential_data_t> paw_potential_data_;
mdarray<double_complex, 4> paw_dij_;
int max_paw_basis_size_{0};
void init_PAW();
double xc_mt_PAW_nonmagnetic(Radial_grid const& rgrid,
mdarray<double, 3>& out_atom_pot,
mdarray<double, 2> const& full_rho_lm,
std::vector<double> const& rho_core);
double xc_mt_PAW_collinear(Radial_grid const& rgrid,
mdarray<double,3> &out_atom_pot,
mdarray<double,2> const& full_rho_lm,
mdarray<double,3> const& magnetization_lm,
std::vector<double> const& rho_core);
// TODO DO
void xc_mt_PAW_noncollinear( ) { };
void calc_PAW_local_potential(paw_potential_data_t &pdd,
mdarray<double, 2> const& ae_full_density,
mdarray<double, 2> const& ps_full_density,
mdarray<double, 3> const& ae_local_magnetization,
mdarray<double, 3> const& ps_local_magnetization);
void calc_PAW_local_Dij(paw_potential_data_t &pdd, mdarray<double_complex, 4>& paw_dij);
double calc_PAW_hartree_potential(Atom& atom, const Radial_grid& grid,
mdarray<double, 2> const& full_density,
mdarray<double, 3>& out_atom_pot);
double calc_PAW_one_elec_energy(paw_potential_data_t &pdd,
const mdarray<double_complex, 4>& density_matrix,
const mdarray<double_complex, 4>& paw_dij);
void add_paw_Dij_to_atom_Dmtrx();
/// Solve Poisson equation for a single atom.
template <bool free_atom>
inline std::vector<double>
poisson_vmt(Atom const& atom__,
Spheric_function<function_domain_t::spectral, double> const& rho_mt__,
Spheric_function<function_domain_t::spectral, double>& vha_mt__) const
{
int lmmax_rho = rho_mt__.angular_domain_size();
int lmmax_pot = vha_mt__.angular_domain_size();
assert((int)l_by_lm_.size() >= lmmax_rho);
if (lmmax_rho > ctx_.lmmax_rho()) {
std::stringstream s;
s << "wrong angular size of rho_mt for atom of " << atom__.type().symbol() << std::endl
<< " lmmax_rho: " << lmmax_rho << std::endl
<< " ctx.lmmax_rho(): " << ctx_.lmmax_rho();
TERMINATE(s);
}
std::vector<double> qmt(ctx_.lmmax_rho(), 0);
double R = atom__.mt_radius();
int nmtp = atom__.num_mt_points();
#pragma omp parallel
{
std::vector<double> g1;
std::vector<double> g2;
#pragma omp for
for (int lm = 0; lm < lmmax_rho; lm++) {
int l = l_by_lm_[lm];
auto rholm = rho_mt__.component(lm);
/* save multipole moment */
qmt[lm] = rholm.integrate(g1, l + 2);
if (lm < lmmax_pot) {
rholm.integrate(g2, 1 - l);
double d2 = fourpi / double(2 * l + 1);
double vlm;
for (int ir = 0; ir < nmtp; ir++) {
double r = atom__.radial_grid(ir);
if (free_atom) {
vlm = g1[ir] / std::pow(r, l + 1) + (g2.back() - g2[ir]) * std::pow(r, l);
} else {
double d1 = 1.0 / std::pow(R, 2 * l + 1);
vlm = (1.0 - std::pow(r / R, 2 * l + 1)) * g1[ir] / std::pow(r, l + 1) +
(g2.back() - g2[ir]) * std::pow(r, l) - (g1.back() - g1[ir]) * std::pow(r, l) * d1;
}
vha_mt__(lm, ir) = vlm * d2;
}
}
}
}
if (!free_atom) {
/* constant part of nuclear potential -z*(1/r - 1/R) */
for (int ir = 0; ir < nmtp; ir++) {
vha_mt__(0, ir) += atom__.zn() / R / y00;
}
}
/* nuclear multipole moment */
qmt[0] -= atom__.zn() * y00;
return std::move(qmt);
}
/// Compute MT part of the potential and MT multipole moments
inline void poisson_vmt(Periodic_function<double>* rho__,
Periodic_function<double>* vh__,
mdarray<double_complex, 2>& qmt__)
{
PROFILE("sirius::Potential::poisson_vmt");
qmt__.zero();
for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) {
int ia = unit_cell_.spl_num_atoms(ialoc);
auto qmt = poisson_vmt<false>(unit_cell_.atom(ia), rho__->f_mt(ialoc),
const_cast<Spheric_function<function_domain_t::spectral, double>&>(vh__->f_mt(ialoc)));
SHT::convert(ctx_.lmax_rho(), &qmt[0], &qmt__(0, ia));
}
ctx_.comm().allreduce(&qmt__(0, 0), (int)qmt__.size());
}
/// Perform a G-vector summation of plane-wave coefficiens multiplied by radial integrals.
inline void poisson_sum_G(int lmmax__,
double_complex* fpw__,
mdarray<double, 3>& fl__,
mdarray<double_complex, 2>& flm__);
/// Add contribution from the pseudocharge to the plane-wave expansion
inline void poisson_add_pseudo_pw(mdarray<double_complex, 2>& qmt, mdarray<double_complex, 2>& qit, double_complex* rho_pw);
/// Generate local part of pseudo potential.
/** Total local potential is a lattice sum:
* \f[
* V({\bf r}) = \sum_{{\bf T},\alpha} V_{\alpha}({\bf r} - {\bf T} - {\bf \tau}_{\alpha})
* \f]
* We want to compute it's plane-wave expansion coefficients:
* \f[
* V({\bf G}) = \frac{1}{V} \int e^{-i{\bf Gr}} V({\bf r}) d{\bf r} =
* \frac{1}{V} \sum_{{\bf T},\alpha} \int e^{-i{\bf Gr}}V_{\alpha}({\bf r} - {\bf T} - {\bf \tau}_{\alpha})d{\bf r}
* \f]
* Standard change of variables: \f$ {\bf r}' = {\bf r} - {\bf T} - {\bf \tau}_{\alpha},\; {\bf r} = {\bf r}' + {\bf T} + {\bf \tau}_{\alpha} \f$
* leads to:
* \f[
* V({\bf G}) = \frac{1}{V} \sum_{{\bf T},\alpha} \int e^{-i{\bf G}({\bf r}' + {\bf T} + {\bf \tau}_{\alpha})}V_{\alpha}({\bf r}')d{\bf r'} =
* \frac{N}{V} \sum_{\alpha} \int e^{-i{\bf G}({\bf r}' + {\bf \tau}_{\alpha})}V_{\alpha}({\bf r}')d{\bf r'} =
* \frac{1}{\Omega} \sum_{\alpha} e^{-i {\bf G} {\bf \tau}_{\alpha} } \int e^{-i{\bf G}{\bf r}}V_{\alpha}({\bf r})d{\bf r}
* \f]
* Using the well-known expansion of a plane wave in terms of spherical Bessel functions:
* \f[
* e^{i{\bf G}{\bf r}}=4\pi \sum_{\ell m} i^\ell j_{\ell}(Gr)Y_{\ell m}^{*}({\bf \hat G})Y_{\ell m}({\bf \hat r})
* \f]
* and remembering that for \f$ \ell = 0 \f$ (potential is sphericla) \f$ j_{0}(x) = \sin(x) / x \f$ we have:
* \f[
* V_{\alpha}({\bf G}) = \int V_{\alpha}(r) 4\pi \frac{\sin(Gr)}{Gr} Y^{*}_{00} Y_{00} r^2 \sin(\theta) dr d \phi d\theta =
* 4\pi \int V_{\alpha}(r) \frac{\sin(Gr)}{Gr} r^2 dr
* \f]
* The tricky part comes next: \f$ V_{\alpha}({\bf r}) \f$ is a long-range potential -- it decays slowly as
* \f$ -Z_{\alpha}^{p}/r \f$ and the straightforward integration with sperical Bessel function is numerically
* unstable. For \f$ {\bf G} = 0 \f$ an extra term \f$ Z_{\alpha}^p/r \f$, corresponding to the potential of
* pseudo-ion, is added to and removed from the local part of the atomic pseudopotential \f$ V_{\alpha}({\bf r}) \f$:
* \f[
* V_{\alpha}({\bf G} = 0) = \int V_{\alpha}({\bf r})d{\bf r} \Rightarrow
* 4\pi \int \Big( V_{\alpha}(r) + \frac{Z_{\alpha}^p}{r} \Big) r^2 dr -
* 4\pi \int \Big( \frac{Z_{\alpha}^p}{r} \Big) r^2 dr
* \f]
* Second term corresponds to the average electrostatic potential of ions and it is ignored
* (like the \f$ {\bf G} = 0 \f$ term in the Hartree potential of electrons).
* For \f$ G \ne 0 \f$ the following trick is done: \f$ Z_{\alpha}^p {\rm erf}(r) / r \f$ is added to and
* removed from \f$ V_{\alpha}(r) \f$. The idea is to make potential decay quickly and then take the extra
* contribution analytically. We have:
* \f[
* V_{\alpha}({\bf G}) = 4\pi \int \Big(V_{\alpha}(r) + Z_{\alpha}^p \frac{{\rm erf}(r)} {r} -
* Z_{\alpha}^p \frac{{\rm erf}(r)}{r}\Big) \frac{\sin(Gr)}{Gr} r^2 dr
* \f]
* Analytical contribution from the error function is computed using the 1D Fourier transform in complex plane:
* \f[
* \frac{1}{\sqrt{2 \pi}} \int_{-\infty}^{\infty} {\rm erf}(t) e^{i\omega t} dt =
* \frac{i e^{-\frac{\omega ^2}{4}} \sqrt{\frac{2}{\pi }}}{\omega }
* \f]
* from which we immediately get
* \f[
* \int_{0}^{\infty} \frac{{\rm erf}(r)}{r} \frac{\sin(Gr)}{Gr} r^2 dr = \frac{e^{-\frac{G^2}{4}}}{G^2}
* \f]
* The final expression for the local potential radial integrals for \f$ G \ne 0 \f$ take the following form:
* \f[
* 4\pi \int \Big(V_{\alpha}(r) r + Z_{\alpha}^p {\rm erf}(r) \Big) \frac{\sin(Gr)}{G} dr - \frac{e^{-\frac{G^2}{4}}}{G^2}
* \f]
*/
inline void generate_local_potential()
{
PROFILE("sirius::Potential::generate_local_potential");
auto v = unit_cell_.make_periodic_function([this](int iat, double g)
{
return ctx_.radial_integrals().vloc_radial_integral(iat, g);
},
ctx_.gvec());
ctx_.fft().transform<1>(ctx_.gvec().partition(), &v[ctx_.gvec().partition().gvec_offset_fft()]);
ctx_.fft().output(&local_potential_->f_rg(0));
//if (ctx_.control().print_checksum_) {
// auto cs = local_potential_->checksum_pw();
// auto cs1 = local_potential_->checksum_rg();
// if (ctx_.comm().rank() == 0) {
// DUMP("checksum(local_potential_pw): %18.10f %18.10f", cs.real(), cs.imag());
// DUMP("checksum(local_potential_rg): %18.10f", cs1);
// }
//}
}
inline void xc_mt_nonmagnetic(Radial_grid const& rgrid,
std::vector<XC_functional>& xc_func,
Spheric_function<spectral, double> const& rho_lm,
Spheric_function<spatial, double>& rho_tp,
Spheric_function<spatial, double>& vxc_tp,
Spheric_function<spatial, double>& exc_tp);
inline void xc_mt_magnetic(Radial_grid const& rgrid,
std::vector<XC_functional>& xc_func,
Spheric_function<spectral, double>& rho_up_lm,
Spheric_function<spatial, double>& rho_up_tp,
Spheric_function<spectral, double>& rho_dn_lm,
Spheric_function<spatial, double>& rho_dn_tp,
Spheric_function<spatial, double>& vxc_up_tp,
Spheric_function<spatial, double>& vxc_dn_tp,
Spheric_function<spatial, double>& exc_tp);
inline void xc_mt(Periodic_function<double>* rho,
Periodic_function<double>* magnetization[3],
std::vector<XC_functional>& xc_func,
Periodic_function<double>* vxc,
Periodic_function<double>* bxc[3],
Periodic_function<double>* exc);
inline void xc_it_nonmagnetic(Periodic_function<double>* rho,
std::vector<XC_functional>& xc_func,
Periodic_function<double>* vxc,
Periodic_function<double>* exc);
inline void xc_it_magnetic(Periodic_function<double>* rho,
Periodic_function<double>* magnetization[3],
std::vector<XC_functional>& xc_func,
Periodic_function<double>* vxc,
Periodic_function<double>* bxc[3],
Periodic_function<double>* exc);
inline void init();
public:
/// Constructor
Potential(Simulation_context& ctx__)
: ctx_(ctx__)
, unit_cell_(ctx__.unit_cell())
, comm_(ctx__.comm())
{
PROFILE("sirius::Potential::Potential");
lmax_ = std::max(ctx_.lmax_rho(), ctx_.lmax_pot());
sht_ = std::unique_ptr<SHT>(new SHT(lmax_));
if (lmax_ >= 0) {
l_by_lm_ = Utils::l_by_lm(lmax_);
/* precompute i^l */
zil_.resize(lmax_ + 1);
for (int l = 0; l <= lmax_; l++) {
zil_[l] = std::pow(double_complex(0, 1), l);
}
zilm_.resize(Utils::lmmax(lmax_));
for (int l = 0, lm = 0; l <= lmax_; l++) {
for (int m = -l; m <= l; m++, lm++) {
zilm_[lm] = zil_[l];
}
}
}
effective_potential_ = std::unique_ptr<Periodic_function<double>>(new Periodic_function<double>(ctx_, ctx_.lmmax_pot(), 1));
int need_gvec{1};
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j] = new Periodic_function<double>(ctx_, ctx_.lmmax_pot(), need_gvec);
}
hartree_potential_ = new Periodic_function<double>(ctx_, ctx_.lmmax_pot(), 1);
hartree_potential_->allocate_mt(false);
xc_potential_ = new Periodic_function<double>(ctx_, ctx_.lmmax_pot(), 1);
xc_potential_->allocate_mt(false);
xc_energy_density_ = new Periodic_function<double>(ctx_, ctx_.lmmax_pot(), 0);
xc_energy_density_->allocate_mt(false);
if (!ctx_.full_potential()) {
local_potential_ = std::unique_ptr<Periodic_function<double>>(new Periodic_function<double>(ctx_, 0, 0));
local_potential_->zero();
generate_local_potential();
}
vh_el_ = mdarray<double, 1>(unit_cell_.num_atoms());
if (ctx_.full_potential()) {
gvec_ylm_ = mdarray<double_complex, 2>(ctx_.lmmax_pot(), ctx_.gvec().gvec_count(comm_.rank()));
for (int igloc = 0; igloc < ctx_.gvec().gvec_count(comm_.rank()); igloc++) {
int ig = ctx_.gvec().gvec_offset(comm_.rank()) + igloc;
auto rtp = SHT::spherical_coordinates(ctx_.gvec().gvec_cart(ig));
SHT::spherical_harmonics(ctx_.lmax_pot(), rtp[1], rtp[2], &gvec_ylm_(0, igloc));
}
}
if (ctx_.full_potential()) {
switch (ctx_.valence_relativity()) {
case relativity_t::iora: {
rm2_inv_pw_ = mdarray<double_complex, 1>(ctx_.gvec().num_gvec());
}
case relativity_t::zora: {
rm_inv_pw_ = mdarray<double_complex, 1>(ctx_.gvec().num_gvec());
}
default: {
veff_pw_ = mdarray<double_complex, 1>(ctx_.gvec().num_gvec());
}
}
}
init();
/* create list of XC functionals */
for (auto& xc_label: ctx_.xc_functionals()) {
xc_func_.push_back(std::move(XC_functional(xc_label, ctx_.num_spins())));
}
/* in case of PAW */
init_PAW();
}
~Potential()
{
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
delete effective_magnetic_field_[j];
}
delete hartree_potential_;
delete xc_potential_;
delete xc_energy_density_;
}
inline void set_effective_potential_ptr(double* veffmt, double* veffit)
{
if (ctx_.full_potential() && veffmt) {
effective_potential_->set_mt_ptr(veffmt);
}
if (veffit) {
effective_potential_->set_rg_ptr(veffit);
}
}
inline void set_effective_magnetic_field_ptr(double* beffmt, double* beffit)
{
if (ctx_.num_mag_dims() == 0) {
return;
}
assert(ctx_.num_spins() == 2);
/* set temporary array wrapper */
mdarray<double, 4> beffmt_tmp(beffmt, ctx_.lmmax_pot(), unit_cell_.max_num_mt_points(),
unit_cell_.num_atoms(), ctx_.num_mag_dims());
mdarray<double, 2> beffit_tmp(beffit, ctx_.fft().size(), ctx_.num_mag_dims());
if (ctx_.num_mag_dims() == 1) {
/* z-component */
if (beffmt) {
effective_magnetic_field_[0]->set_mt_ptr(&beffmt_tmp(0, 0, 0, 0));
}
if (beffit) {
effective_magnetic_field_[0]->set_rg_ptr(&beffit_tmp(0, 0));
}
}
if (ctx_.num_mag_dims() == 3) {
if (beffmt) {
/* z-component */
effective_magnetic_field_[0]->set_mt_ptr(&beffmt_tmp(0, 0, 0, 2));
/* x-component */
effective_magnetic_field_[1]->set_mt_ptr(&beffmt_tmp(0, 0, 0, 0));
/* y-component */
effective_magnetic_field_[2]->set_mt_ptr(&beffmt_tmp(0, 0, 0, 1));
}
if (beffit) {
/* z-component */
effective_magnetic_field_[0]->set_rg_ptr(&beffit_tmp(0, 2));
/* x-component */
effective_magnetic_field_[1]->set_rg_ptr(&beffit_tmp(0, 0));
/* y-component */
effective_magnetic_field_[2]->set_rg_ptr(&beffit_tmp(0, 1));
}
}
}
/// Zero effective potential and magnetic field.
inline void zero()
{
effective_potential_->zero();
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->zero();
}
}
/// Poisson solver.
/** Detailed explanation is available in:
* - Weinert, M. (1981). Solution of Poisson's equation: beyond Ewald-type methods.
* Journal of Mathematical Physics, 22(11), 2433–2439. doi:10.1063/1.524800
* - Classical Electrodynamics Third Edition by J. D. Jackson.
*
* Solution of Poisson's equation for the muffin-tin geometry is carried out in several steps:
* - True multipole moments \f$ q_{\ell m}^{\alpha} \f$ of the muffin-tin charge density are computed.
* - Pseudocharge density is introduced. Pseudocharge density coincides with the true charge density
* in the interstitial region and it's multipole moments inside muffin-tin spheres coincide with the
* true multipole moments.
* - Poisson's equation for the pseudocharge density is solved in the plane-wave domain. It gives the
* correct interstitial potential and correct muffin-tin boundary values.
* - Finally, muffin-tin part of potential is found by solving Poisson's equation in spherical coordinates
* with Dirichlet boundary conditions.
*
* We start by computing true multipole moments of the charge density inside the muffin-tin spheres:
* \f[
* q_{\ell m}^{\alpha} = \int Y_{\ell m}^{*}(\hat {\bf r}) r^{\ell} \rho({\bf r}) d {\bf r} =
* \int \rho_{\ell m}^{\alpha}(r) r^{\ell + 2} dr
* \f]
* and for the nucleus with charge density \f$ \rho(r, \theta, \phi) = -\frac{Z \delta(r)}{4 \pi r^2} \f$:
* \f[
* q_{00}^{\alpha} = \int Y_{0 0} \frac{-Z_{\alpha} \delta(r)}{4 \pi r^2} r^2 \sin \theta dr d\phi d\theta =
* -Z_{\alpha} Y_{00}
* \f]
*
* Now we need to get the multipole moments of the interstitial charge density \f$ \rho^{I}({\bf r}) \f$ inside
* muffin-tin spheres. We need this in order to estimate the amount of pseudocharge to be added to
* \f$ \rho^{I}({\bf r}) \f$ to get the pseudocharge multipole moments equal to the true multipole moments.
* We want to compute
* \f[
* q_{\ell m}^{I,\alpha} = \int Y_{\ell m}^{*}(\hat {\bf r}) r^{\ell} \rho^{I}({\bf r}) d {\bf r}
* \f]
* where
* \f[
* \rho^{I}({\bf r}) = \sum_{\bf G}e^{i{\bf Gr}} \rho({\bf G})
* \f]
*
* Recall the spherical plane wave expansion:
* \f[
* e^{i{\bf G r}}=4\pi e^{i{\bf G r}_{\alpha}} \sum_{\ell m} i^\ell
* j_{\ell}(G|{\bf r}-{\bf r}_{\alpha}|)
* Y_{\ell m}^{*}({\bf \hat G}) Y_{\ell m}(\widehat{{\bf r}-{\bf r}_{\alpha}})
* \f]
* Multipole moments of each plane-wave are computed as:
* \f[
* q_{\ell m}^{\alpha}({\bf G}) = 4 \pi e^{i{\bf G r}_{\alpha}} Y_{\ell m}^{*}({\bf \hat G}) i^{\ell}
* \int_{0}^{R} j_{\ell}(Gr) r^{\ell + 2} dr = 4 \pi e^{i{\bf G r}_{\alpha}} Y_{\ell m}^{*}({\bf \hat G}) i^{\ell}
* \left\{\begin{array}{ll} \frac{R^{\ell + 2} j_{\ell + 1}(GR)}{G} & G \ne 0 \\
* \frac{R^3}{3} \delta_{\ell 0} & G = 0 \end{array} \right.
* \f]
*
* Final expression for the muffin-tin multipole moments of the interstitial charge denisty:
* \f[
* q_{\ell m}^{I,\alpha} = \sum_{\bf G}\rho({\bf G}) q_{\ell m}^{\alpha}({\bf G})
* \f]
*
* Now we are going to modify interstitial charge density inside the muffin-tin region in order to
* get the true multipole moments. We will add a pseudodensity of the form:
* \f[
* P({\bf r}) = \sum_{\ell m} p_{\ell m}^{\alpha} Y_{\ell m}(\hat {\bf r}) r^{\ell} \left(1-\frac{r^2}{R^2}\right)^n
* \f]
* Radial functions of the pseudodensity are chosen in a special way. First, they produce a confined and
* smooth functions inside muffin-tins and second (most important) plane-wave coefficients of the
* pseudodensity can be computed analytically. Let's find the relation between \f$ p_{\ell m}^{\alpha} \f$
* coefficients and true and interstitial multipole moments first. We are searching for the pseudodensity which restores
* the true multipole moments:
* \f[
* \int Y_{\ell m}^{*}(\hat {\bf r}) r^{\ell} \Big(\rho^{I}({\bf r}) + P({\bf r})\Big) d {\bf r} = q_{\ell m}^{\alpha}
* \f]
* Then
* \f[
* p_{\ell m}^{\alpha} = \frac{q_{\ell m}^{\alpha} - q_{\ell m}^{I,\alpha}}
* {\int r^{2 \ell + 2} \left(1-\frac{r^2}{R^2}\right)^n dr} =
* (q_{\ell m}^{\alpha} - q_{\ell m}^{I,\alpha}) \frac{2 \Gamma(5/2 + \ell + n)}{R^{2\ell + 3}\Gamma(3/2 + \ell) \Gamma(n + 1)}
* \f]
*
* Now let's find the plane-wave coefficients of \f$ P({\bf r}) \f$ inside each muffin-tin:
* \f[
* P^{\alpha}({\bf G}) = \frac{4\pi e^{-i{\bf G r}_{\alpha}}}{\Omega} \sum_{\ell m} (-i)^{\ell} Y_{\ell m}({\bf \hat G})
* p_{\ell m}^{\alpha} \int_{0}^{R} j_{\ell}(G r) r^{\ell} \left(1-\frac{r^2}{R^2}\right)^n r^2 dr
* \f]
*
* Integral of the spherical Bessel function with the radial pseudodensity component is taken analytically:
* \f[
* \int_{0}^{R} j_{\ell}(G r) r^{\ell} \left(1-\frac{r^2}{R^2}\right)^n r^2 dr =
* 2^n R^{\ell + 3} (GR)^{-n - 1} \Gamma(n + 1) j_{n + \ell + 1}(GR)
* \f]
*
* The final expression for the pseudodensity plane-wave component is:
* \f[
* P^{\alpha}({\bf G}) = \frac{4\pi e^{-i{\bf G r}_{\alpha}}}{\Omega} \sum_{\ell m} (-i)^{\ell} Y_{\ell m}({\bf \hat G})
* (q_{\ell m}^{\alpha} - q_{\ell m}^{I,\alpha}) \Big( \frac{2}{GR} \Big)^{n+1}
* \frac{ \Gamma(5/2 + n + \ell) } {R^{\ell} \Gamma(3/2+\ell)}
* \f]
*
* For \f$ G=0 \f$ only \f$ \ell = 0 \f$ contribution survives:
* \f[
* P^{\alpha}({\bf G}=0) = \frac{4\pi}{\Omega} Y_{00} (q_{00}^{\alpha} - q_{00}^{I,\alpha})
* \f]
*
* We can now sum the contributions from all muffin-tin spheres and obtain a modified charge density,
* which is equal to the exact charge density in the interstitial region and which has correct multipole
* moments inside muffin-tin spheres:
* \f[
* \tilde \rho({\bf G}) = \rho({\bf G}) + \sum_{\alpha} P^{\alpha}({\bf G})
* \f]
* This density is used to solve the Poisson's equation in the plane-wave domain:
* \f[
* V_{H}({\bf G}) = \frac{4 \pi \tilde \rho({\bf G})}{G^2}
* \f]
* The potential is correct in the interstitial region and also on the muffin-tin surface. We will use
* it to find the boundary conditions for the potential inside the muffin-tins. Using spherical
* plane-wave expansion we get:
* \f[
* V^{\alpha}_{\ell m}(R) = \sum_{\bf G} V_{H}({\bf G})
* 4\pi e^{i{\bf G r}_{\alpha}} i^\ell
* j_{\ell}^{\alpha}(GR) Y_{\ell m}^{*}({\bf \hat G})
* \f]
*
* As soon as the muffin-tin boundary conditions for the potential are known, we can find the potential
* inside spheres using Dirichlet Green's function:
* \f[
* V({\bf x}) = \int \rho({\bf x'})G_D({\bf x},{\bf x'}) d{\bf x'} - \frac{1}{4 \pi} \int_{S} V({\bf x'})
* \frac{\partial G_D}{\partial n'} d{\bf S'}
* \f]
* where Dirichlet Green's function for the sphere is defined as:
* \f[
* G_D({\bf x},{\bf x'}) = 4\pi \sum_{\ell m} \frac{Y_{\ell m}^{*}({\bf \hat x'})
* Y_{\ell m}(\hat {\bf x})}{2\ell + 1}
* \frac{r_{<}^{\ell}}{r_{>}^{\ell+1}}\Biggl(1 - \Big( \frac{r_{>}}{R} \Big)^{2\ell + 1} \Biggr)
* \f]
* and it's normal derivative at the surface is equal to:
* \f[
* \frac{\partial G_D}{\partial n'} = -\frac{4 \pi}{R^2} \sum_{\ell m} \Big( \frac{r}{R} \Big)^{\ell}
* Y_{\ell m}^{*}({\bf \hat x'}) Y_{\ell m}(\hat {\bf x})
* \f]
*/
inline void poisson(Periodic_function<double>* rho, Periodic_function<double>* vh);
/// Generate XC potential and energy density
/** In case of spin-unpolarized GGA the XC potential has the following expression:
* \f[
* V_{XC}({\bf r}) = \frac{\partial}{\partial \rho} \varepsilon_{xc}(\rho, \nabla \rho) -
* \nabla \frac{\partial}{\partial (\nabla \rho)} \varepsilon_{xc}(\rho, \nabla \rho)
* \f]
* LibXC packs the gradient information into the so-called \a sigma array:
* \f[
* \sigma = \nabla \rho \nabla \rho
* \f]
* Changing variables in \f$ V_{XC} \f$ expression gives:
* \f{eqnarray*}{
* V_{XC}({\bf r}) &=& \frac{\partial}{\partial \rho} \varepsilon_{xc}(\rho, \sigma) -
* \nabla \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma}
* \frac{\partial \sigma}{ \partial (\nabla \rho)} \\
* &=& \frac{\partial}{\partial \rho} \varepsilon_{xc}(\rho, \sigma) -
* 2 \nabla \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \nabla \rho -
* 2 \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \nabla^2 \rho
* \f}
* The following sequence of functions must be computed:
* - density on the real space grid
* - gradient of density (in spectral representation)
* - gradient of density on the real space grid
* - laplacian of density (in spectral representation)
* - laplacian of density on the real space grid
* - \a sigma array
* - a call to Libxc must be performed \a sigma derivatives must be obtained
* - \f$ \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \f$ in spectral representation
* - gradient of \f$ \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \f$ in spectral representation
* - gradient of \f$ \frac{\partial \varepsilon_{xc}(\rho, \sigma)}{\partial \sigma} \f$ on the real space grid
*
* Expression for spin-polarized potential has a bit more complicated form:
* \f{eqnarray*}
* V_{XC}^{\gamma} &=& \frac{\partial \varepsilon_{xc}}{\partial \rho_{\gamma}} - \nabla
* \Big( 2 \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \gamma}} \nabla \rho_{\gamma} +
* \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \delta}} \nabla \rho_{\delta} \Big) \\
* &=& \frac{\partial \varepsilon_{xc}}{\partial \rho_{\gamma}}
* -2 \nabla \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \gamma}} \nabla \rho_{\gamma}
* -2 \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \gamma}} \nabla^2 \rho_{\gamma}
* - \nabla \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \delta}} \nabla \rho_{\delta}
* - \frac{\partial \varepsilon_{xc}}{\partial \sigma_{\gamma \delta}} \nabla^2 \rho_{\delta}
* \f}
* In magnetic case the "up" and "dn" density and potential decomposition is used. Using the fact that the
* effective magnetic field is parallel to magnetization at each point in space, we can write the coupling
* of density and magnetization with XC potential and XC magentic field as:
* \f[
* V_{xc}({\bf r}) \rho({\bf r}) + {\bf B}_{xc}({\bf r}){\bf m}({\bf r}) =
* V_{xc}({\bf r}) \rho({\bf r}) + {\rm B}_{xc}({\bf r}) {\rm m}({\bf r}) =
* V^{\uparrow}({\bf r})\rho^{\uparrow}({\bf r}) + V^{\downarrow}({\bf r})\rho^{\downarrow}({\bf r})
* \f]
* where
* \f{eqnarray*}{
* \rho^{\uparrow}({\bf r}) &=& \frac{1}{2}\Big( \rho({\bf r}) + {\rm m}({\bf r}) \Big) \\
* \rho^{\downarrow}({\bf r}) &=& \frac{1}{2}\Big( \rho({\bf r}) - {\rm m}({\bf r}) \Big)
* \f}
* and
* \f{eqnarray*}{
* V^{\uparrow}({\bf r}) &=& V_{xc}({\bf r}) + {\rm B}_{xc}({\bf r}) \\
* V^{\downarrow}({\bf r}) &=& V_{xc}({\bf r}) - {\rm B}_{xc}({\bf r})
* \f}
*/
void xc(Periodic_function<double>* rho,
Periodic_function<double>* magnetization[3],
Periodic_function<double>* vxc,
Periodic_function<double>* bxc[3],
Periodic_function<double>* exc);
/// Generate effective potential and magnetic field from charge density and magnetization.
inline void generate(Density& density__)
{
PROFILE("sirius::Potential::generate");
/* zero effective potential and magnetic field */
zero();
/* solve Poisson equation */
poisson(density__.rho(), hartree_potential_);
/* add Hartree potential to the total potential */
effective_potential_->add(hartree_potential_);
if (ctx_.full_potential()) {
xc(density__.rho(), density__.magnetization(), xc_potential_, effective_magnetic_field_, xc_energy_density_);
} else {
/* add local ionic potential to the effective potential */
effective_potential_->add(local_potential_.get());
/* create temporary function for rho + rho_core */
Periodic_function<double> rhovc(ctx_, 0, 0);
rhovc.zero();
rhovc.add(density__.rho());
rhovc.add(density__.rho_pseudo_core());
/* construct XC potentials from rho + rho_core */
xc(&rhovc, density__.magnetization(), xc_potential_, effective_magnetic_field_, xc_energy_density_);
}
/* add XC potential to the effective potential */
effective_potential_->add(xc_potential_);
if (ctx_.full_potential()) {
effective_potential_->sync_mt();
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->sync_mt();
}
}
/* get plane-wave coefficients of effective potential;
* they will be used in three places:
* 1) compute D-matrix
* 2) establish a mapping between fine and coarse FFT grid for the Hloc operator
* 3) symmetrize effective potential */
effective_potential_->fft_transform(-1);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->fft_transform(-1);
}
if (!ctx_.full_potential()) {
generate_D_operator_matrix();
generate_PAW_effective_potential(density__);
}
}
inline void save()
{
if (comm_.rank() == 0) {
HDF5_tree fout(storage_file_name, false);
effective_potential_->hdf5_write(fout["effective_potential"]);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->hdf5_write(fout["effective_magnetic_field"].create_node(j));
}
}
comm_.barrier();
}
inline void load()
{
HDF5_tree fout(storage_file_name, false);
effective_potential_->hdf5_read(fout["effective_potential"]);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->hdf5_read(fout["effective_magnetic_field"][j]);
}
if (ctx_.full_potential()) {
update_atomic_potential();
}
}
inline void update_atomic_potential()
{
for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) {
int ia = unit_cell_.atom_symmetry_class(ic).atom_id(0);
int nmtp = unit_cell_.atom(ia).num_mt_points();
std::vector<double> veff(nmtp);
for (int ir = 0; ir < nmtp; ir++) {
veff[ir] = y00 * effective_potential_->f_mt<index_domain_t::global>(0, ir, ia);
}
unit_cell_.atom_symmetry_class(ic).set_spherical_potential(veff);
}
for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) {
double* veff = &effective_potential_->f_mt<index_domain_t::global>(0, 0, ia);
double* beff[] = {nullptr, nullptr, nullptr};
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
beff[i] = &effective_magnetic_field_[i]->f_mt<index_domain_t::global>(0, 0, ia);
}
unit_cell_.atom(ia).set_nonspherical_potential(veff, beff);
}
}
template <device_t pu>
void add_mt_contribution_to_pw();
/// Generate plane-wave coefficients of the potential in the interstitial region.
void generate_pw_coefs();
/// Calculate D operator from potential and augmentation charge.
/** The following real symmetric matrix is computed:
* \f[
* D_{\xi \xi'}^{\alpha} = \int V({\bf r}) Q_{\xi \xi'}^{\alpha}({\bf r}) d{\bf r}
* \f]
* In the plane-wave domain this integrals transform into sum over Fourier components:
* \f[
* D_{\xi \xi'}^{\alpha} = \sum_{\bf G} \langle V |{\bf G}\rangle \langle{\bf G}|Q_{\xi \xi'}^{\alpha} \rangle =
* \sum_{\bf G} V^{*}({\bf G}) e^{-i{\bf r}_{\alpha}{\bf G}} Q_{\xi \xi'}^{A}({\bf G}) =
* \sum_{\bf G} Q_{\xi \xi'}^{A}({\bf G}) \tilde V_{\alpha}^{*}({\bf G})
* \f]
* where \f$ \alpha \f$ is the atom, \f$ A \f$ is the atom type and
* \f[
* \tilde V_{\alpha}({\bf G}) = e^{i{\bf r}_{\alpha}{\bf G}} V({\bf G})
* \f]
* Both \f$ V({\bf r}) \f$ and \f$ Q({\bf r}) \f$ functions are real and the following condition is fulfilled:
* \f[
* \tilde V_{\alpha}({\bf G}) = \tilde V_{\alpha}^{*}(-{\bf G})
* \f]
* \f[
* Q_{\xi \xi'}({\bf G}) = Q_{\xi \xi'}^{*}(-{\bf G})
* \f]
* In the sum over plane-wave coefficients the \f$ {\bf G} \f$ and \f$ -{\bf G} \f$ contributions will give:
* \f[
* Q_{\xi \xi'}^{A}({\bf G}) \tilde V_{\alpha}^{*}({\bf G}) + Q_{\xi \xi'}^{A}(-{\bf G}) \tilde V_{\alpha}^{*}(-{\bf G}) =
* 2 \Re \Big( Q_{\xi \xi'}^{A}({\bf G}) \Big) \Re \Big( \tilde V_{\alpha}^{*}({\bf G}) \Big) +
* 2 \Im \Big( Q_{\xi \xi'}^{A}({\bf G}) \Big) \Im \Big( \tilde V_{\alpha}^{*}({\bf G}) \Big)
* \f]
* This allows the use of a <b>dgemm</b> instead of a <b>zgemm</b> when \f$ D_{\xi \xi'}^{\alpha} \f$ matrix
* is calculated for all atoms of the same type.
*/
void generate_D_operator_matrix();
void generate_PAW_effective_potential(Density& density);
std::vector<double> const& PAW_hartree_energies() const
{
return paw_hartree_energies_;
}
std::vector<double> const& PAW_xc_energies() const
{
return paw_xc_energies_;
}
std::vector<double> const& PAW_core_energies() const
{
return paw_core_energies_;
}
std::vector<double> const& PAW_one_elec_energies()
{
return paw_one_elec_energies_;
}
double PAW_hartree_total_energy() const
{
return paw_hartree_total_energy_;
}
double PAW_xc_total_energy() const
{
return paw_xc_total_energy_;
}
double PAW_total_core_energy() const
{
return paw_total_core_energy_;
}
double PAW_total_energy()
{
return paw_hartree_total_energy_ + paw_xc_total_energy_ ;
}
double PAW_one_elec_energy()
{
return paw_one_elec_energy_;
}
void check_potential_continuity_at_mt();
/// Total size (number of elements) of the potential and effective magnetic field.
inline size_t size()
{
size_t s = effective_potential_->size();
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
s += effective_magnetic_field_[i]->size();
}
return s;
}
inline void pack(Mixer<double>& mixer)
{
size_t n = effective_potential_->pack(0, mixer);
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
n += effective_magnetic_field_[i]->pack(n, mixer);
}
}
inline void unpack(double const* buffer)
{
size_t n = effective_potential_->unpack(buffer);
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
n += effective_magnetic_field_[i]->unpack(&buffer[n]);
}
}
Periodic_function<double>* effective_potential()
{
return effective_potential_.get();
}
Periodic_function<double>& local_potential()
{
return *local_potential_;
}
Spheric_function<spectral, double> const& effective_potential_mt(int ialoc) const
{
return effective_potential_->f_mt(ialoc);
}
Periodic_function<double>** effective_magnetic_field()
{
return effective_magnetic_field_;
}
Periodic_function<double>* effective_magnetic_field(int i)
{
return effective_magnetic_field_[i];
}
Periodic_function<double>* hartree_potential()
{
return hartree_potential_;
}
Spheric_function<spectral, double> const& hartree_potential_mt(int ialoc) const
{
return hartree_potential_->f_mt(ialoc);
}
Periodic_function<double>* xc_potential()
{
return xc_potential_;
}
Periodic_function<double>* xc_energy_density()
{
return xc_energy_density_;
}
void allocate()
{
effective_potential_->allocate_mt(true);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->allocate_mt(true);
}
}
inline double vh_el(int ia)
{
return vh_el_(ia);
}
inline double energy_vha()
{
return energy_vha_;
}
void mixer_init()
{
mixer_ = Mixer_factory<double>(ctx_.mixer_input_section().type_, size(), ctx_.mixer_input_section(), comm_);
pack(*mixer_);
mixer_->initialize();
}
double mix()
{
pack(*mixer_);
double rms = mixer_->mix();
unpack(mixer_->output_buffer());
return rms;
}
double_complex const& veff_pw(int ig__) const
{
return veff_pw_(ig__);
}
inline void set_veff_pw(double_complex* veff_pw__)
{
std::copy(veff_pw__, veff_pw__ + ctx_.gvec().num_gvec(), veff_pw_.at<CPU>());
}
double_complex const& rm_inv_pw(int ig__) const
{
return rm_inv_pw_(ig__);
}
inline void set_rm_inv_pw(double_complex* rm_inv_pw__)
{
std::copy(rm_inv_pw__, rm_inv_pw__ + ctx_.gvec().num_gvec(), rm_inv_pw_.at<CPU>());
}
double_complex const& rm2_inv_pw(int ig__) const
{
return rm2_inv_pw_(ig__);
}
inline void set_rm2_inv_pw(double_complex* rm2_inv_pw__)
{
std::copy(rm2_inv_pw__, rm2_inv_pw__ + ctx_.gvec().num_gvec(), rm2_inv_pw_.at<CPU>());
}
inline void fft_transform(int direction__)
{
effective_potential_->fft_transform(direction__);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
effective_magnetic_field_[j]->fft_transform(direction__);
}
}
};
#include "Potential/init.hpp"
#include "Potential/generate_d_operator_matrix.hpp"
#include "Potential/generate_pw_coefs.hpp"
#include "Potential/xc.hpp"
#include "Potential/poisson.hpp"
#include "Potential/paw_potential.hpp"
};
#endif // __POTENTIAL_H__
|
GB_binop__lt_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__lt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint16)
// A*D function (colscale): GB (_AxD__lt_uint16)
// D*A function (rowscale): GB (_DxB__lt_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint16)
// C=scalar+B GB (_bind1st__lt_uint16)
// C=scalar+B' GB (_bind1st_tran__lt_uint16)
// C=A+scalar GB (_bind2nd__lt_uint16)
// C=A'+scalar GB (_bind2nd_tran__lt_uint16)
// C type: bool
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_UINT16 || GxB_NO_LT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_uint64_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint64_bool
// op(A') function: GB_unop_tran__identity_uint64_bool
// C type: uint64_t
// A type: bool
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint64_bool
(
uint64_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__islt_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_fp64)
// A*D function (colscale): GB (_AxD__islt_fp64)
// D*A function (rowscale): GB (_DxB__islt_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_fp64)
// C=scalar+B GB (_bind1st__islt_fp64)
// C=scalar+B' GB (_bind1st_tran__islt_fp64)
// C=A+scalar GB (_bind2nd__islt_fp64)
// C=A'+scalar GB (_bind2nd_tran__islt_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_FP64 || GxB_NO_ISLT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
privateFunctions.c | #include <stdio.h>
#include <stdlib.h>
#define COMPEARTH_PRIVATE_DET3X3 1
#define COMPEARTH_PRIVATE_GEMV3 1
#define COMPEARTH_PRIVATE_GEM3 1
#define COMPEARTH_PRIVATE_GEMT3 1
#define COMPEARTH_PRIVATE_CROSS3 1
#define COMPEARTH_PRIVATE_NORM3 1
#define COMPEARTH_PRIVATE_DOT3 1
#define COMPEARTH_PRIVATE_WRAP360 1
#define COMPEARTH_PRIVATE_MOD 1
#define COMPEARTH_PRIVATE_ANTIPODE 1
#define COMPEARTH_PRIVATE_UPDOWN_ABS_ARGSORT3 1
#define COMPEARTH_PRIVATE_UPDOWN_ARGSORT3 1
#define COMPEARTH_PRIVATE_ARGSORT3 1
#include "compearth.h"
/*!
* @brief Computes the determinant of a 3 x 3 major in column major order.
*/
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline double det3x3ColumnMajor(const double *__restrict__ A)
{
double det;
det = A[0]*( A[4]*A[8] - A[5]*A[7])
- A[3]*( A[1]*A[8] - A[2]*A[7])
+ A[6]*( A[1]*A[5] - A[2]*A[4]);
return det;
}
/*!
* @brief Computes C = A*B where A, B, and C are 3 x 3 matrices in column
* major order.
*/
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline void gemm3_colMajorNoTransNoTrans(const double *__restrict__ A,
const double *__restrict__ B,
double *__restrict__ C)
{
// column 1
C[0] = A[0]*B[0] + A[3]*B[1] + A[6]*B[2];
C[1] = A[1]*B[0] + A[4]*B[1] + A[7]*B[2];
C[2] = A[2]*B[0] + A[5]*B[1] + A[8]*B[2];
// column 2
C[3] = A[0]*B[3] + A[3]*B[4] + A[6]*B[5];
C[4] = A[1]*B[3] + A[4]*B[4] + A[7]*B[5];
C[5] = A[2]*B[3] + A[5]*B[4] + A[8]*B[5];
// column 3
C[6] = A[0]*B[6] + A[3]*B[7] + A[6]*B[8];
C[7] = A[1]*B[6] + A[4]*B[7] + A[7]*B[8];
C[8] = A[2]*B[6] + A[5]*B[7] + A[8]*B[8];
return;
}
/*!
* @brief Computes C = A*B' where A, B, and C are 3 x 3 matrices in column
* major order.
*/
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline void gemm3_colMajorNoTransTrans(const double *__restrict__ A,
const double *__restrict__ B,
double *__restrict__ C)
{
// column 1
C[0] = A[0]*B[0] + A[3]*B[3] + A[6]*B[6];
C[1] = A[1]*B[0] + A[4]*B[3] + A[7]*B[6];
C[2] = A[2]*B[0] + A[5]*B[3] + A[8]*B[6];
// column 2
C[3] = A[0]*B[1] + A[3]*B[4] + A[6]*B[7];
C[4] = A[1]*B[1] + A[4]*B[4] + A[7]*B[7];
C[5] = A[2]*B[1] + A[5]*B[4] + A[8]*B[7];
// column 3
C[6] = A[0]*B[2] + A[3]*B[5] + A[6]*B[8];
C[7] = A[1]*B[2] + A[4]*B[5] + A[7]*B[8];
C[8] = A[2]*B[2] + A[5]*B[5] + A[8]*B[8];
return;
}
/*!
* @brief Computes y = A*x where A is a 3 x 3 matrix in column major order
* and x and y are length 3 vectors.
*/
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline void gemv3_colMajorNoTrans(const double *__restrict__ A,
const double *__restrict__ x,
double *__restrict__ y)
{
y[0] = A[0]*x[0] + A[3]*x[1] + A[6]*x[2];
y[1] = A[1]*x[0] + A[4]*x[1] + A[7]*x[2];
y[2] = A[2]*x[0] + A[5]*x[1] + A[8]*x[2];
return;
}
/*!
* @brief Computes the cross-product, c = a x c, where a, b, and c are
* length 3 vectors.
*/
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline void cross3(const double *__restrict__ a,
const double *__restrict__ b,
double *__restrict__ c)
{
// Compute cross product
c[0] = a[1]*b[2] - a[2]*b[1];
c[1] = a[2]*b[0] - a[0]*b[2];
c[2] = a[0]*b[1] - a[1]*b[0];
return;
}
/*!
* @brief Computes the norm of a vector a which is length 3.
*/
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline double norm3(const double *__restrict__ a)
{
return sqrt(a[0]*a[0] + a[1]*a[1] + a[2]*a[2]);
}
/*!
* @brief Computes the dot-product a.b where a and b are length 3 vectors.
*/
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline double dot3(const double *__restrict__ a, const double *__restrict__ b)
{
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2];
}
/*!
* @brief Wraps angle in degrees to [0,360]
*
* @param[in] lon angle to wrap (degrees)
*
* @result wrapped angle in range [0,360]
*
* @author Carl Tape translated to C by Ben Baker
*
* @copyright MIT
*
*/
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline double wrap360(const double lon)
{
double lonw;
bool lpos;
lpos = false;
if (lon > 0.0){lpos = true;}
lonw = lon - floor(lon/360.0)*360.0; //lonw = fmod(lon, 360.0);
if (lon == 360.){lonw = 360.0;} // matlab convention
if (lonw == 0.0 && lpos){lonw = 360.0;}
return lonw;
}
/*!
* @brief Emulation of Matlab's modulus after division.
*
* @author Ben Baker
*
* @copyright MIT
*
*/
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline double mod(const double x, const double y)
{
double xmod;
bool xisnty;
xmod = 0.0; // Convention 1 - if y == 0.0
// This will avoid division by 0
if (fabs(y) > 0.0)
{
xmod = x - floor(x/y)*y;
// Convention 2 - mod(x, x) is 0
xisnty = false;
if (x != y){xisnty = true;}
if (!xisnty){xmod = 0.0;} //if (x == y){xmod = 0.0;}
// Convention 3 - if x ~= y and y~=0 then mod has the same
// sign as y. Note y~= 0 is true b/c fabs(y) > 0.
if (xisnty && y < 0.0 && xmod > 0.0){xmod =-xmod;}
if (xisnty && y > 0.0 && xmod < 0.0){xmod =+xmod;}
}
/*
// Original code
if (y == 0.0){return x;}
if (x == y){return 0.0;}
xmod = x - floor(x/y)*y;
// 3rd convention - for x ~= y and y ~= 0 mod has same sign as y
if (x != y && y != 0.0)
{
if (y < 0.0){xmod =-xmod;}
if (y > 0.0){xmod =+xmod;}
}
*/
return xmod;
}
#ifdef _OPENMP
#pragma omp declare simd
#endif
inline void antipode(const double lat, const double lon,
double *latOut, double *lonOut, const bool isDeg)
{
*latOut =-lat;
if (isDeg)
{
*lonOut = 180.0 - mod(-lon, 360.0);
}
else
{
*lonOut = M_PI - mod(-lon, 2.0*M_PI);
}
return;
}
/*!
* @brief Argsort a length 3 vector in increasing or decreasing order.
*
* @param[in] x Length 3 array to argsort.
* @param[in] lascend If true then sort in ascending order. \n
* Otherwise, sort in descending order.
*
* @param[out] perm Permutation such that x(perm) is in ascending
* or descending order.
*
* @result 0 indicates success.
*
* @author Ben Baker, ISTI
*
* @copyright MIT
*
*/
inline int argsort3_upDown(const double *__restrict__ x,
const bool lascend,
int *__restrict__ perm)
{
int permt[3], ierr;
if (lascend)
{
ierr = argsort3(x, perm);
}
else
{
ierr = argsort3(x, permt);
perm[0] = permt[2];
perm[1] = permt[1];
perm[2] = permt[0];
}
return ierr;
}
//============================================================================//
/*!
* @brief Argsort a length 3 vector in increasing or decreasing order
* of absolute value.
*
* @param[in] x Length 3 array to argsort.
* @param[in] lascend If true then sort in ascending order. \n
* Otherwise, sort in descending order.
*
* @param[out] perm Permutation such that |x(perm)| is in ascending
* or descending order.
*
* @result 0 indicates success.
*
* @author Ben Baker, ISTI
*
* @copyright MIT
*
*/
inline int argsort3_absUpDown(const double *__restrict__ x,
const bool lascend,
int *__restrict__ perm)
{
double xa[3];
int ierr;
xa[0] = fabs(x[0]);
xa[1] = fabs(x[1]);
xa[2] = fabs(x[2]);
ierr = argsort3_upDown(xa, lascend, perm);
return ierr;
}
//============================================================================//
/*!
* @brief Argsorts a length 3 array in ascending order.
*
* @param[in] x Length 3 array to argsort.
*
* @param[out] perm Permutation such that x(perm) is in ascending order.
*
* @result 0 indicates success.
*
* @author Ben Baker, ISTI
*
* @copyright MIT
*
*/
inline int argsort3(const double *__restrict__ x, int *__restrict__ perm)
{
int i, temp;
const int a = 0;
const int b = 1;
const int c = 2;
// Copy
perm[a] = a;
perm[b] = b;
perm[c] = c;
if (x[perm[a]] > x[perm[c]])
{
temp = perm[c];
perm[c] = perm[a];
perm[a] = temp;
}
if (x[perm[a]] > x[perm[b]])
{
temp = perm[b];
perm[b] = perm[a];
perm[a] = temp;
}
//Now the smallest element is the first one. Just check the 2-nd and 3-rd
if (x[perm[b]] > x[perm[c]])
{
temp = perm[c];
perm[c] = perm[b];
perm[b] = temp;
}
// Verify
for (i=1; i<3; i++)
{
if (x[perm[i-1]] > x[perm[i]])
{
#ifdef COMPEARTH_DEBUG_SRC
fprintf(stderr, "%s: Failed to sort numbers in ascending order\n",
__func__);
#endif
return -1;
}
}
return 0;
}
|
_interpolate3d.c | /* Generated by Cython 0.22 */
#define PY_SSIZE_T_CLEAN
#ifndef CYTHON_USE_PYLONG_INTERNALS
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 0
#else
#include "pyconfig.h"
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 1
#else
#define CYTHON_USE_PYLONG_INTERNALS 0
#endif
#endif
#endif
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_22"
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#if PY_MAJOR_VERSION >= 3
#define Py_TPFLAGS_CHECKTYPES 0
#define Py_TPFLAGS_HAVE_INDEX 0
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#else
#define CYTHON_PEP393_ENABLED 0
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#define __Pyx_PyFrozenSet_Size(s) PyObject_Size(s)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#define __Pyx_PyFrozenSet_Size(s) PySet_Size(s)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None)
#ifdef __cplusplus
template<typename T>
void __Pyx_call_destructor(T* x) {
x->~T();
}
template<typename T>
class __Pyx_FakeReference {
public:
__Pyx_FakeReference() : ptr(NULL) { }
__Pyx_FakeReference(T& ref) : ptr(&ref) { }
T *operator->() { return ptr; }
operator T&() { return *ptr; }
private:
T *ptr;
};
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#define __PYX_HAVE___interpolate3d
#define __PYX_HAVE_API___interpolate3d
#include "string.h"
#include "stdio.h"
#include "stdlib.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "math.h"
#include "pythread.h"
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \
(sizeof(type) < sizeof(Py_ssize_t)) || \
(sizeof(type) > sizeof(Py_ssize_t) && \
likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX) && \
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \
v == (type)PY_SSIZE_T_MIN))) || \
(sizeof(type) == sizeof(Py_ssize_t) && \
(is_signed || likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_COMPILING_IN_CPYTHON
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"_interpolate3d.pyx",
"__init__.pxd",
"stringsource",
"type.pxd",
};
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && MSC_VER
#include <Windows.h>
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using MSVC atomics"
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview) \
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview) \
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":726
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":727
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":728
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":729
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":733
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":734
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":735
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":736
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":740
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":741
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":750
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":751
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":752
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":754
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":755
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":756
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":758
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":759
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":761
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":762
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":763
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":765
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":766
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":767
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":769
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* "View.MemoryView":99
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":269
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":302
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":921
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":302
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":921
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
if (acquire_gil) { \
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
PyGILState_Release(__pyx_gilstate_save); \
} else { \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext() \
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_XDECREF(tmp); \
} while (0)
#define __Pyx_DECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_DECREF(tmp); \
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
const char* function_name);
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb);
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb);
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
static CYTHON_INLINE int __Pyx_PyDict_Contains(PyObject* item, PyObject* dict, int eq) {
int result = PyDict_Contains(dict, item);
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
#if PY_MAJOR_VERSION >= 3
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args))
PyErr_SetObject(PyExc_KeyError, args);
Py_XDECREF(args);
}
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb);
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) : \
(is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) : \
__Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
int is_list, int wraparound, int boundscheck);
static CYTHON_INLINE int __Pyx_IterFinish(void);
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name);
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index);
static CYTHON_INLINE int __Pyx_unpack_tuple2(PyObject* tuple, PyObject** value1, PyObject** value2,
int is_tuple, int has_known_size, int decref_tuple);
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name,
Py_ssize_t* p_orig_length, int* p_is_dict);
static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos,
PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict);
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact);
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
static CYTHON_INLINE long __Pyx_div_long(long, long); /* proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
#define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2)
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
#include <string.h>
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* proto */
#define UNARY_NEG_WOULD_OVERFLOW(x) (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback);
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
#define __Pyx_CyFunction_USED 1
#include <structmember.h>
#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
#define __Pyx_CYFUNCTION_CCLASS 0x04
#define __Pyx_CyFunction_GetClosure(f) \
(((__pyx_CyFunctionObject *) (f))->func_closure)
#define __Pyx_CyFunction_GetClassObj(f) \
(((__pyx_CyFunctionObject *) (f))->func_classobj)
#define __Pyx_CyFunction_Defaults(type, f) \
((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
#define __Pyx_CyFunction_SetDefaultsGetter(f, g) \
((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
typedef struct {
PyCFunctionObject func;
#if PY_VERSION_HEX < 0x030500A0
PyObject *func_weakreflist;
#endif
PyObject *func_dict;
PyObject *func_name;
PyObject *func_qualname;
PyObject *func_doc;
PyObject *func_globals;
PyObject *func_code;
PyObject *func_closure;
PyObject *func_classobj;
void *defaults;
int defaults_pyobjects;
int flags;
PyObject *defaults_tuple;
PyObject *defaults_kwdict;
PyObject *(*defaults_getter)(PyObject *);
PyObject *func_annotations;
} __pyx_CyFunctionObject;
static PyTypeObject *__pyx_CyFunctionType = 0;
#define __Pyx_CyFunction_NewEx(ml, flags, qualname, self, module, globals, code) \
__Pyx_CyFunction_New(__pyx_CyFunctionType, ml, flags, qualname, self, module, globals, code)
static PyObject *__Pyx_CyFunction_New(PyTypeObject *, PyMethodDef *ml,
int flags, PyObject* qualname,
PyObject *self,
PyObject *module, PyObject *globals,
PyObject* code);
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m,
size_t size,
int pyobjects);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
PyObject *tuple);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
PyObject *dict);
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
PyObject *dict);
static int __Pyx_CyFunction_init(void);
typedef struct {
__pyx_CyFunctionObject func;
PyObject *__signatures__;
PyObject *type;
PyObject *self;
} __pyx_FusedFunctionObject;
#define __pyx_FusedFunction_NewEx(ml, flags, qualname, self, module, globals, code) \
__pyx_FusedFunction_New(__pyx_FusedFunctionType, ml, flags, qualname, self, module, globals, code)
static PyObject *__pyx_FusedFunction_New(PyTypeObject *type,
PyMethodDef *ml, int flags,
PyObject *qualname, PyObject *self,
PyObject *module, PyObject *globals,
PyObject *code);
static int __pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self);
static PyTypeObject *__pyx_FusedFunctionType = NULL;
static int __pyx_FusedFunction_init(void);
#define __Pyx_FusedFunction_USED
typedef struct {
int code_line;
PyCodeObject* code_object;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *);
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character);
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eqf(a, b) ((a)==(b))
#define __Pyx_c_sumf(a, b) ((a)+(b))
#define __Pyx_c_difff(a, b) ((a)-(b))
#define __Pyx_c_prodf(a, b) ((a)*(b))
#define __Pyx_c_quotf(a, b) ((a)/(b))
#define __Pyx_c_negf(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zerof(z) ((z)==(float)0)
#define __Pyx_c_conjf(z) (::std::conj(z))
#if 1
#define __Pyx_c_absf(z) (::std::abs(z))
#define __Pyx_c_powf(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zerof(z) ((z)==0)
#define __Pyx_c_conjf(z) (conjf(z))
#if 1
#define __Pyx_c_absf(z) (cabsf(z))
#define __Pyx_c_powf(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq(a, b) ((a)==(b))
#define __Pyx_c_sum(a, b) ((a)+(b))
#define __Pyx_c_diff(a, b) ((a)-(b))
#define __Pyx_c_prod(a, b) ((a)*(b))
#define __Pyx_c_quot(a, b) ((a)/(b))
#define __Pyx_c_neg(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero(z) ((z)==(double)0)
#define __Pyx_c_conj(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs(z) (::std::abs(z))
#define __Pyx_c_pow(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero(z) ((z)==0)
#define __Pyx_c_conj(z) (conj(z))
#if 1
#define __Pyx_c_abs(z) (cabs(z))
#define __Pyx_c_pow(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs,
char order, int ndim);
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
static int __Pyx_check_binary_version(void);
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
static PyObject *__Pyx_ImportModule(const char *name);
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from '_interpolate3d' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t = { "float64_t", NULL, sizeof(__pyx_t_5numpy_float64_t), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "_interpolate3d"
int __pyx_module_is_main__interpolate3d = 0;
/* Implementation of '_interpolate3d' */
static PyObject *__pyx_builtin_ImportError;
static PyObject *__pyx_builtin_AttributeError;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_ord;
static PyObject *__pyx_builtin_zip;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_RuntimeError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static PyObject *__pyx_pf_14_interpolate3d_interpolate3d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults); /* proto */
static PyObject *__pyx_pf_14_interpolate3d_2interpolate3d(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED int __pyx_v_n, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, PyArrayObject *__pyx_v_z, int __pyx_v_n_x_vals, PyArrayObject *__pyx_v_x_vals, int __pyx_v_n_y_vals, PyArrayObject *__pyx_v_y_vals, int __pyx_v_n_z_vals, PyArrayObject *__pyx_v_z_vals, PyArrayObject *__pyx_v_vals, PyArrayObject *__pyx_v_result_array); /* proto */
static PyObject *__pyx_pf_14_interpolate3d_4interpolate3d(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED int __pyx_v_n, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, PyArrayObject *__pyx_v_z, int __pyx_v_n_x_vals, PyArrayObject *__pyx_v_x_vals, int __pyx_v_n_y_vals, PyArrayObject *__pyx_v_y_vals, int __pyx_v_n_z_vals, PyArrayObject *__pyx_v_z_vals, PyArrayObject *__pyx_v_vals, PyArrayObject *__pyx_v_result_array); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static char __pyx_k_[] = "()";
static char __pyx_k_B[] = "B";
static char __pyx_k_H[] = "H";
static char __pyx_k_I[] = "I";
static char __pyx_k_L[] = "L";
static char __pyx_k_O[] = "O";
static char __pyx_k_Q[] = "Q";
static char __pyx_k_b[] = "b";
static char __pyx_k_c[] = "c";
static char __pyx_k_d[] = "d";
static char __pyx_k_f[] = "f";
static char __pyx_k_g[] = "g";
static char __pyx_k_h[] = "h";
static char __pyx_k_i[] = "i";
static char __pyx_k_l[] = "l";
static char __pyx_k_n[] = "n";
static char __pyx_k_q[] = "q";
static char __pyx_k_x[] = "x";
static char __pyx_k_y[] = "y";
static char __pyx_k_z[] = "z";
static char __pyx_k_Zd[] = "Zd";
static char __pyx_k_Zf[] = "Zf";
static char __pyx_k_Zg[] = "Zg";
static char __pyx_k__3[] = "|";
static char __pyx_k_id[] = "id";
static char __pyx_k_np[] = "np";
static char __pyx_k_v0[] = "v0";
static char __pyx_k_v1[] = "v1";
static char __pyx_k_xi[] = "xi";
static char __pyx_k_yi[] = "yi";
static char __pyx_k_zi[] = "zi";
static char __pyx_k_obj[] = "obj";
static char __pyx_k_ord[] = "ord";
static char __pyx_k_sys[] = "sys";
static char __pyx_k_v00[] = "v00";
static char __pyx_k_v01[] = "v01";
static char __pyx_k_v10[] = "v10";
static char __pyx_k_v11[] = "v11";
static char __pyx_k_zip[] = "zip";
static char __pyx_k_args[] = "args";
static char __pyx_k_base[] = "base";
static char __pyx_k_kind[] = "kind";
static char __pyx_k_main[] = "__main__";
static char __pyx_k_mode[] = "mode";
static char __pyx_k_name[] = "name";
static char __pyx_k_ndim[] = "ndim";
static char __pyx_k_pack[] = "pack";
static char __pyx_k_size[] = "size";
static char __pyx_k_step[] = "step";
static char __pyx_k_stop[] = "stop";
static char __pyx_k_test[] = "__test__";
static char __pyx_k_v000[] = "v000";
static char __pyx_k_v001[] = "v001";
static char __pyx_k_v010[] = "v010";
static char __pyx_k_v011[] = "v011";
static char __pyx_k_v100[] = "v100";
static char __pyx_k_v101[] = "v101";
static char __pyx_k_v110[] = "v110";
static char __pyx_k_v111[] = "v111";
static char __pyx_k_vals[] = "vals";
static char __pyx_k_class[] = "__class__";
static char __pyx_k_dtype[] = "dtype";
static char __pyx_k_error[] = "error";
static char __pyx_k_flags[] = "flags";
static char __pyx_k_float[] = "float";
static char __pyx_k_numpy[] = "numpy";
static char __pyx_k_range[] = "range";
static char __pyx_k_shape[] = "shape";
static char __pyx_k_split[] = "split";
static char __pyx_k_start[] = "start";
static char __pyx_k_strip[] = "strip";
static char __pyx_k_x_fac[] = "x_fac";
static char __pyx_k_y_fac[] = "y_fac";
static char __pyx_k_z_fac[] = "z_fac";
static char __pyx_k_double[] = "double";
static char __pyx_k_format[] = "format";
static char __pyx_k_import[] = "__import__";
static char __pyx_k_kwargs[] = "kwargs";
static char __pyx_k_name_2[] = "__name__";
static char __pyx_k_struct[] = "struct";
static char __pyx_k_unpack[] = "unpack";
static char __pyx_k_x_vals[] = "x_vals";
static char __pyx_k_y_vals[] = "y_vals";
static char __pyx_k_z_vals[] = "z_vals";
static char __pyx_k_fortran[] = "fortran";
static char __pyx_k_memview[] = "memview";
static char __pyx_k_mid_ind[] = "mid_ind";
static char __pyx_k_ndarray[] = "ndarray";
static char __pyx_k_Ellipsis[] = "Ellipsis";
static char __pyx_k_defaults[] = "defaults";
static char __pyx_k_itemsize[] = "itemsize";
static char __pyx_k_n_x_vals[] = "n_x_vals";
static char __pyx_k_n_y_vals[] = "n_y_vals";
static char __pyx_k_n_z_vals[] = "n_z_vals";
static char __pyx_k_TypeError[] = "TypeError";
static char __pyx_k_enumerate[] = "enumerate";
static char __pyx_k_x_bot_ind[] = "x_bot_ind";
static char __pyx_k_x_top_ind[] = "x_top_ind";
static char __pyx_k_y_bot_ind[] = "y_bot_ind";
static char __pyx_k_y_top_ind[] = "y_top_ind";
static char __pyx_k_z_bot_ind[] = "z_bot_ind";
static char __pyx_k_z_top_ind[] = "z_top_ind";
static char __pyx_k_IndexError[] = "IndexError";
static char __pyx_k_ValueError[] = "ValueError";
static char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static char __pyx_k_signatures[] = "signatures";
static char __pyx_k_ImportError[] = "ImportError";
static char __pyx_k_MemoryError[] = "MemoryError";
static char __pyx_k_RuntimeError[] = "RuntimeError";
static char __pyx_k_result_array[] = "result_array";
static char __pyx_k_interpolate3d[] = "interpolate3d";
static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static char __pyx_k_AttributeError[] = "AttributeError";
static char __pyx_k_allocate_buffer[] = "allocate_buffer";
static char __pyx_k_dtype_is_object[] = "dtype_is_object";
static char __pyx_k_interpolate3d_2[] = "_interpolate3d";
static char __pyx_k_strided_and_direct[] = "<strided and direct>";
static char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static char __pyx_k_getbuffer_obj_view_flags[] = "getbuffer(obj, view, flags)";
static char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct";
static char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)";
static char __pyx_k_No_matching_signature_found[] = "No matching signature found";
static char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)";
static char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static char __pyx_k_Expected_at_least_d_arguments[] = "Expected at least %d arguments";
static char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static char __pyx_k_mnt_pact_ds381_seren3_src_analy[] = "/mnt/pact/ds381/seren3/src/analysis/_interpolate3d.pyx";
static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced";
static char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions";
static char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static char __pyx_k_Function_call_with_ambiguous_arg[] = "Function call with ambiguous argument types";
static char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_kp_s_;
static PyObject *__pyx_n_s_AttributeError;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_s_Expected_at_least_d_arguments;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_kp_s_Function_call_with_ambiguous_arg;
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_kp_s_No_matching_signature_found;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_kp_s__3;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_args;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_defaults;
static PyObject *__pyx_n_s_double;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_float;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_interpolate3d;
static PyObject *__pyx_n_s_interpolate3d_2;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_kind;
static PyObject *__pyx_n_s_kwargs;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mid_ind;
static PyObject *__pyx_kp_s_mnt_pact_ds381_seren3_src_analy;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_n;
static PyObject *__pyx_n_s_n_x_vals;
static PyObject *__pyx_n_s_n_y_vals;
static PyObject *__pyx_n_s_n_z_vals;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndarray;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_ord;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_result_array;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_signatures;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_split;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_n_s_strip;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_sys;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_v0;
static PyObject *__pyx_n_s_v00;
static PyObject *__pyx_n_s_v000;
static PyObject *__pyx_n_s_v001;
static PyObject *__pyx_n_s_v01;
static PyObject *__pyx_n_s_v010;
static PyObject *__pyx_n_s_v011;
static PyObject *__pyx_n_s_v1;
static PyObject *__pyx_n_s_v10;
static PyObject *__pyx_n_s_v100;
static PyObject *__pyx_n_s_v101;
static PyObject *__pyx_n_s_v11;
static PyObject *__pyx_n_s_v110;
static PyObject *__pyx_n_s_v111;
static PyObject *__pyx_n_s_vals;
static PyObject *__pyx_n_s_x;
static PyObject *__pyx_n_s_x_bot_ind;
static PyObject *__pyx_n_s_x_fac;
static PyObject *__pyx_n_s_x_top_ind;
static PyObject *__pyx_n_s_x_vals;
static PyObject *__pyx_n_s_xi;
static PyObject *__pyx_n_s_y;
static PyObject *__pyx_n_s_y_bot_ind;
static PyObject *__pyx_n_s_y_fac;
static PyObject *__pyx_n_s_y_top_ind;
static PyObject *__pyx_n_s_y_vals;
static PyObject *__pyx_n_s_yi;
static PyObject *__pyx_n_s_z;
static PyObject *__pyx_n_s_z_bot_ind;
static PyObject *__pyx_n_s_z_fac;
static PyObject *__pyx_n_s_z_top_ind;
static PyObject *__pyx_n_s_z_vals;
static PyObject *__pyx_n_s_zi;
static PyObject *__pyx_n_s_zip;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__21;
static PyObject *__pyx_slice__22;
static PyObject *__pyx_slice__23;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__28;
static PyObject *__pyx_tuple__29;
static PyObject *__pyx_tuple__30;
static PyObject *__pyx_tuple__31;
static PyObject *__pyx_codeobj__26;
/* "_interpolate3d.pyx":14
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def interpolate3d(int n, # <<<<<<<<<<<<<<
* np.ndarray[floating,ndim=1] x,
* np.ndarray[floating,ndim=1] y,
*/
/* Python wrapper */
static PyObject *__pyx_pw_14_interpolate3d_1interpolate3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_14_interpolate3d_1interpolate3d = {"interpolate3d", (PyCFunction)__pyx_pw_14_interpolate3d_1interpolate3d, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_14_interpolate3d_1interpolate3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_signatures = 0;
PyObject *__pyx_v_args = 0;
PyObject *__pyx_v_kwargs = 0;
CYTHON_UNUSED PyObject *__pyx_v_defaults = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_fused_cpdef (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_signatures,&__pyx_n_s_args,&__pyx_n_s_kwargs,&__pyx_n_s_defaults,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_signatures)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_args)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_kwargs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_defaults)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fused_cpdef") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
}
__pyx_v_signatures = values[0];
__pyx_v_args = values[1];
__pyx_v_kwargs = values[2];
__pyx_v_defaults = values[3];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("_interpolate3d.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_14_interpolate3d_interpolate3d(__pyx_self, __pyx_v_signatures, __pyx_v_args, __pyx_v_kwargs, __pyx_v_defaults);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_14_interpolate3d_interpolate3d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults) {
PyObject *__pyx_v_dest_sig = NULL;
PyObject *__pyx_v_ndarray = 0;
PyObject *__pyx_v_numpy = NULL;
__Pyx_memviewslice __pyx_v_memslice;
Py_ssize_t __pyx_v_itemsize;
CYTHON_UNUSED int __pyx_v_dtype_signed;
char __pyx_v_kind;
PyObject *__pyx_v_arg = NULL;
PyObject *__pyx_v_dtype = NULL;
PyObject *__pyx_v_arg_base = NULL;
PyObject *__pyx_v_candidates = NULL;
PyObject *__pyx_v_sig = NULL;
int __pyx_v_match_found;
PyObject *__pyx_v_src_type = NULL;
PyObject *__pyx_v_dst_type = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
char __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_t_13;
Py_ssize_t __pyx_t_14;
PyObject *(*__pyx_t_15)(PyObject *);
PyObject *__pyx_t_16 = NULL;
PyObject *__pyx_t_17 = NULL;
PyObject *__pyx_t_18 = NULL;
PyObject *(*__pyx_t_19)(PyObject *);
int __pyx_t_20;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("interpolate3d", 0);
__Pyx_INCREF(__pyx_v_kwargs);
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(Py_None);
PyList_SET_ITEM(__pyx_t_1, 0, Py_None);
__Pyx_GIVEREF(Py_None);
__pyx_v_dest_sig = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_2 = (__pyx_v_kwargs == Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF_SET(__pyx_v_kwargs, __pyx_t_1);
__pyx_t_1 = 0;
goto __pyx_L3;
}
__pyx_L3:;
{
__Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_6);
/*try:*/ {
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_numpy = __pyx_t_1;
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_numpy, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(PyType_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "type", Py_TYPE(__pyx_t_1)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__pyx_v_ndarray = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
}
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
goto __pyx_L11_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_7 = PyErr_ExceptionMatches(__pyx_builtin_ImportError) || PyErr_ExceptionMatches(__pyx_builtin_AttributeError) || PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_7) {
__Pyx_AddTraceback("_interpolate3d.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_8, &__pyx_t_9) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_9);
__Pyx_INCREF(Py_None);
__Pyx_XDECREF_SET(__pyx_v_ndarray, ((PyObject*)Py_None));
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L5_exception_handled;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6);
goto __pyx_L1_error;
__pyx_L5_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6);
__pyx_L11_try_end:;
}
__pyx_v_itemsize = -1;
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_10 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = ((1 < __pyx_t_10) != 0);
if (__pyx_t_3) {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_9 = PyTuple_GET_ITEM(((PyObject*)__pyx_v_args), 1);
__Pyx_INCREF(__pyx_t_9);
__pyx_v_arg = __pyx_t_9;
__pyx_t_9 = 0;
goto __pyx_L14;
}
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_3 = (__Pyx_PyDict_Contains(__pyx_n_s_x, ((PyObject*)__pyx_v_kwargs), Py_EQ)); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_9 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_kwargs), __pyx_n_s_x); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_9);
__pyx_v_arg = __pyx_t_9;
__pyx_t_9 = 0;
goto __pyx_L14;
}
/*else*/ {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_10 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_9 = PyInt_FromSsize_t(__pyx_t_10); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_8 = __Pyx_PyString_Format(__pyx_kp_s_Expected_at_least_d_arguments, __pyx_t_9); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_8);
__pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L14:;
if (0) {
goto __pyx_L15;
}
/*else*/ {
while (1) {
if (!1) break;
__pyx_t_2 = (__pyx_v_ndarray != ((PyObject*)Py_None));
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg, __pyx_v_ndarray);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_dtype); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_v_dtype = __pyx_t_8;
__pyx_t_8 = 0;
goto __pyx_L19;
}
__pyx_t_2 = (__pyx_memoryview_check(__pyx_v_arg) != 0);
if (__pyx_t_2) {
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_base); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_v_arg_base = __pyx_t_8;
__pyx_t_8 = 0;
__pyx_t_2 = __Pyx_TypeCheck(__pyx_v_arg_base, __pyx_v_ndarray);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg_base, __pyx_n_s_dtype); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_v_dtype = __pyx_t_8;
__pyx_t_8 = 0;
goto __pyx_L20;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L20:;
goto __pyx_L19;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L19:;
__pyx_v_itemsize = -1;
__pyx_t_3 = (__pyx_v_dtype != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_itemsize); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_v_itemsize = __pyx_t_10;
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_kind); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_8);
__pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ord, __pyx_t_9, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_11 = __Pyx_PyInt_As_char(__pyx_t_8); if (unlikely((__pyx_t_11 == (char)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_v_kind = __pyx_t_11;
__pyx_v_dtype_signed = (__pyx_v_kind == 'i');
switch (__pyx_v_kind) {
case 'i':
case 'u':
break;
case 'f':
__pyx_t_3 = (((sizeof(float)) == __pyx_v_itemsize) != 0);
if (__pyx_t_3) {
} else {
__pyx_t_2 = __pyx_t_3;
goto __pyx_L23_bool_binop_done;
}
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_3 = ((((Py_ssize_t)__pyx_t_10) == 1) != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L23_bool_binop_done:;
if (__pyx_t_2) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L17_break;
}
__pyx_t_3 = (((sizeof(double)) == __pyx_v_itemsize) != 0);
if (__pyx_t_3) {
} else {
__pyx_t_2 = __pyx_t_3;
goto __pyx_L26_bool_binop_done;
}
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_3 = ((((Py_ssize_t)__pyx_t_10) == 1) != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L26_bool_binop_done:;
if (__pyx_t_2) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L17_break;
}
break;
case 'c':
break;
case 'O':
break;
default: break;
}
goto __pyx_L21;
}
__pyx_L21:;
goto __pyx_L18;
}
__pyx_L18:;
__pyx_t_3 = ((__pyx_v_itemsize == -1) != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_2 = __pyx_t_3;
goto __pyx_L29_bool_binop_done;
}
__pyx_t_3 = ((__pyx_v_itemsize == (sizeof(float))) != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L29_bool_binop_done:;
if (__pyx_t_2) {
__pyx_v_memslice = __Pyx_PyObject_to_MemoryviewSlice_ds_float(__pyx_v_arg);
__pyx_t_2 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_2) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L17_break;
}
/*else*/ {
PyErr_Clear();
}
goto __pyx_L28;
}
__pyx_L28:;
__pyx_t_3 = ((__pyx_v_itemsize == -1) != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_2 = __pyx_t_3;
goto __pyx_L33_bool_binop_done;
}
__pyx_t_3 = ((__pyx_v_itemsize == (sizeof(double))) != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L33_bool_binop_done:;
if (__pyx_t_2) {
__pyx_v_memslice = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_v_arg);
__pyx_t_2 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_2) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L17_break;
}
/*else*/ {
PyErr_Clear();
}
goto __pyx_L32;
}
__pyx_L32:;
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, Py_None, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L17_break;
}
__pyx_L17_break:;
}
__pyx_L15:;
__pyx_t_8 = PyList_New(0); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_v_candidates = ((PyObject*)__pyx_t_8);
__pyx_t_8 = 0;
__pyx_t_10 = 0;
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_9 = __Pyx_dict_iterator(((PyObject*)__pyx_v_signatures), 1, ((PyObject *)NULL), (&__pyx_t_12), (&__pyx_t_7)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_8);
__pyx_t_8 = __pyx_t_9;
__pyx_t_9 = 0;
while (1) {
__pyx_t_13 = __Pyx_dict_iter_next(__pyx_t_8, __pyx_t_12, &__pyx_t_10, &__pyx_t_9, NULL, NULL, __pyx_t_7);
if (unlikely(__pyx_t_13 == 0)) break;
if (unlikely(__pyx_t_13 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_XDECREF_SET(__pyx_v_sig, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_match_found = 0;
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_sig, __pyx_n_s_strip); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_split); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_dest_sig);
PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_dest_sig);
__Pyx_GIVEREF(__pyx_v_dest_sig);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) {
__pyx_t_9 = __pyx_t_1; __Pyx_INCREF(__pyx_t_9); __pyx_t_14 = 0;
__pyx_t_15 = NULL;
} else {
__pyx_t_14 = -1; __pyx_t_9 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_15 = Py_TYPE(__pyx_t_9)->tp_iternext; if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
for (;;) {
if (likely(!__pyx_t_15)) {
if (likely(PyList_CheckExact(__pyx_t_9))) {
if (__pyx_t_14 >= PyList_GET_SIZE(__pyx_t_9)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_1 = PyList_GET_ITEM(__pyx_t_9, __pyx_t_14); __Pyx_INCREF(__pyx_t_1); __pyx_t_14++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_1 = PySequence_ITEM(__pyx_t_9, __pyx_t_14); __pyx_t_14++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
if (__pyx_t_14 >= PyTuple_GET_SIZE(__pyx_t_9)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_9, __pyx_t_14); __Pyx_INCREF(__pyx_t_1); __pyx_t_14++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_1 = PySequence_ITEM(__pyx_t_9, __pyx_t_14); __pyx_t_14++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
}
} else {
__pyx_t_1 = __pyx_t_15(__pyx_t_9);
if (unlikely(!__pyx_t_1)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_1);
}
if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) {
PyObject* sequence = __pyx_t_1;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_16 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_17 = PyTuple_GET_ITEM(sequence, 1);
} else {
__pyx_t_16 = PyList_GET_ITEM(sequence, 0);
__pyx_t_17 = PyList_GET_ITEM(sequence, 1);
}
__Pyx_INCREF(__pyx_t_16);
__Pyx_INCREF(__pyx_t_17);
#else
__pyx_t_16 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_17 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_17);
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_18 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_18);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_19 = Py_TYPE(__pyx_t_18)->tp_iternext;
index = 0; __pyx_t_16 = __pyx_t_19(__pyx_t_18); if (unlikely(!__pyx_t_16)) goto __pyx_L40_unpacking_failed;
__Pyx_GOTREF(__pyx_t_16);
index = 1; __pyx_t_17 = __pyx_t_19(__pyx_t_18); if (unlikely(!__pyx_t_17)) goto __pyx_L40_unpacking_failed;
__Pyx_GOTREF(__pyx_t_17);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_19(__pyx_t_18), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_19 = NULL;
__Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0;
goto __pyx_L41_unpacking_done;
__pyx_L40_unpacking_failed:;
__Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0;
__pyx_t_19 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_L41_unpacking_done:;
}
__Pyx_XDECREF_SET(__pyx_v_src_type, __pyx_t_16);
__pyx_t_16 = 0;
__Pyx_XDECREF_SET(__pyx_v_dst_type, __pyx_t_17);
__pyx_t_17 = 0;
__pyx_t_2 = (__pyx_v_dst_type != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_1 = PyObject_RichCompare(__pyx_v_src_type, __pyx_v_dst_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__pyx_t_3) {
__pyx_v_match_found = 1;
goto __pyx_L43;
}
/*else*/ {
__pyx_v_match_found = 0;
goto __pyx_L39_break;
}
__pyx_L43:;
goto __pyx_L42;
}
__pyx_L42:;
}
__pyx_L39_break:;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_3 = (__pyx_v_match_found != 0);
if (__pyx_t_3) {
__pyx_t_20 = __Pyx_PyList_Append(__pyx_v_candidates, __pyx_v_sig); if (unlikely(__pyx_t_20 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L44;
}
__pyx_L44:;
}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_3 = (__pyx_v_candidates != Py_None) && (PyList_GET_SIZE(__pyx_v_candidates) != 0);
__pyx_t_2 = ((!__pyx_t_3) != 0);
if (__pyx_t_2) {
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_12 = PyList_GET_SIZE(__pyx_v_candidates); if (unlikely(__pyx_t_12 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_2 = ((__pyx_t_12 > 1) != 0);
if (__pyx_t_2) {
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/*else*/ {
__Pyx_XDECREF(__pyx_r);
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_8 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_signatures), PyList_GET_ITEM(__pyx_v_candidates, 0)); if (unlikely(__pyx_t_8 == NULL)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_8);
__pyx_r = __pyx_t_8;
__pyx_t_8 = 0;
goto __pyx_L0;
}
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_16);
__Pyx_XDECREF(__pyx_t_17);
__Pyx_XDECREF(__pyx_t_18);
__Pyx_AddTraceback("_interpolate3d.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_dest_sig);
__Pyx_XDECREF(__pyx_v_ndarray);
__Pyx_XDECREF(__pyx_v_numpy);
__Pyx_XDECREF(__pyx_v_arg);
__Pyx_XDECREF(__pyx_v_dtype);
__Pyx_XDECREF(__pyx_v_arg_base);
__Pyx_XDECREF(__pyx_v_candidates);
__Pyx_XDECREF(__pyx_v_sig);
__Pyx_XDECREF(__pyx_v_src_type);
__Pyx_XDECREF(__pyx_v_dst_type);
__Pyx_XDECREF(__pyx_v_kwargs);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_0__pyx_pw_14_interpolate3d_3interpolate3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_0__pyx_mdef_14_interpolate3d_3interpolate3d = {"__pyx_fuse_0interpolate3d", (PyCFunction)__pyx_fuse_0__pyx_pw_14_interpolate3d_3interpolate3d, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_fuse_0__pyx_pw_14_interpolate3d_3interpolate3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
CYTHON_UNUSED int __pyx_v_n;
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_y = 0;
PyArrayObject *__pyx_v_z = 0;
int __pyx_v_n_x_vals;
PyArrayObject *__pyx_v_x_vals = 0;
int __pyx_v_n_y_vals;
PyArrayObject *__pyx_v_y_vals = 0;
int __pyx_v_n_z_vals;
PyArrayObject *__pyx_v_z_vals = 0;
PyArrayObject *__pyx_v_vals = 0;
PyArrayObject *__pyx_v_result_array = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("interpolate3d (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_n,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z,&__pyx_n_s_n_x_vals,&__pyx_n_s_x_vals,&__pyx_n_s_n_y_vals,&__pyx_n_s_y_vals,&__pyx_n_s_n_z_vals,&__pyx_n_s_z_vals,&__pyx_n_s_vals,&__pyx_n_s_result_array,0};
PyObject* values[12] = {0,0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n_x_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n_y_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 8:
if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n_z_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 9:
if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 10:
if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 11:
if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_result_array)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "interpolate3d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 12) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
}
__pyx_v_n = __Pyx_PyInt_As_int(values[0]); if (unlikely((__pyx_v_n == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_x = ((PyArrayObject *)values[1]);
__pyx_v_y = ((PyArrayObject *)values[2]);
__pyx_v_z = ((PyArrayObject *)values[3]);
__pyx_v_n_x_vals = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_n_x_vals == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_x_vals = ((PyArrayObject *)values[5]);
__pyx_v_n_y_vals = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_n_y_vals == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_y_vals = ((PyArrayObject *)values[7]);
__pyx_v_n_z_vals = __Pyx_PyInt_As_int(values[8]); if (unlikely((__pyx_v_n_z_vals == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z_vals = ((PyArrayObject *)values[9]);
__pyx_v_vals = ((PyArrayObject *)values[10]);
__pyx_v_result_array = ((PyArrayObject *)values[11]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("_interpolate3d.interpolate3d", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 1, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_z), __pyx_ptype_5numpy_ndarray, 1, "z", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x_vals), __pyx_ptype_5numpy_ndarray, 1, "x_vals", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y_vals), __pyx_ptype_5numpy_ndarray, 1, "y_vals", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_z_vals), __pyx_ptype_5numpy_ndarray, 1, "z_vals", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_vals), __pyx_ptype_5numpy_ndarray, 1, "vals", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_result_array), __pyx_ptype_5numpy_ndarray, 1, "result_array", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_pf_14_interpolate3d_2interpolate3d(__pyx_self, __pyx_v_n, __pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_n_x_vals, __pyx_v_x_vals, __pyx_v_n_y_vals, __pyx_v_y_vals, __pyx_v_n_z_vals, __pyx_v_z_vals, __pyx_v_vals, __pyx_v_result_array);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_14_interpolate3d_2interpolate3d(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED int __pyx_v_n, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, PyArrayObject *__pyx_v_z, int __pyx_v_n_x_vals, PyArrayObject *__pyx_v_x_vals, int __pyx_v_n_y_vals, PyArrayObject *__pyx_v_y_vals, int __pyx_v_n_z_vals, PyArrayObject *__pyx_v_z_vals, PyArrayObject *__pyx_v_vals, PyArrayObject *__pyx_v_result_array) {
int __pyx_v_x_top_ind;
int __pyx_v_x_bot_ind;
int __pyx_v_y_top_ind;
int __pyx_v_y_bot_ind;
int __pyx_v_z_top_ind;
int __pyx_v_z_bot_ind;
int __pyx_v_mid_ind;
double __pyx_v_x_fac;
double __pyx_v_y_fac;
double __pyx_v_z_fac;
double __pyx_v_v0;
double __pyx_v_v1;
double __pyx_v_v00;
double __pyx_v_v01;
double __pyx_v_v10;
double __pyx_v_v11;
double __pyx_v_v000;
double __pyx_v_v001;
double __pyx_v_v010;
double __pyx_v_v011;
double __pyx_v_v100;
double __pyx_v_v101;
double __pyx_v_v110;
double __pyx_v_v111;
double __pyx_v_xi;
double __pyx_v_yi;
double __pyx_v_zi;
Py_ssize_t __pyx_v_i;
__Pyx_LocalBuf_ND __pyx_pybuffernd_result_array;
__Pyx_Buffer __pyx_pybuffer_result_array;
__Pyx_LocalBuf_ND __pyx_pybuffernd_vals;
__Pyx_Buffer __pyx_pybuffer_vals;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x_vals;
__Pyx_Buffer __pyx_pybuffer_x_vals;
__Pyx_LocalBuf_ND __pyx_pybuffernd_y;
__Pyx_Buffer __pyx_pybuffer_y;
__Pyx_LocalBuf_ND __pyx_pybuffernd_y_vals;
__Pyx_Buffer __pyx_pybuffer_y_vals;
__Pyx_LocalBuf_ND __pyx_pybuffernd_z;
__Pyx_Buffer __pyx_pybuffer_z;
__Pyx_LocalBuf_ND __pyx_pybuffernd_z_vals;
__Pyx_Buffer __pyx_pybuffer_z_vals;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
__pyx_t_5numpy_float64_t __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
__pyx_t_5numpy_float64_t __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
int __pyx_t_27;
int __pyx_t_28;
int __pyx_t_29;
int __pyx_t_30;
int __pyx_t_31;
int __pyx_t_32;
int __pyx_t_33;
int __pyx_t_34;
int __pyx_t_35;
int __pyx_t_36;
int __pyx_t_37;
int __pyx_t_38;
int __pyx_t_39;
int __pyx_t_40;
int __pyx_t_41;
int __pyx_t_42;
int __pyx_t_43;
int __pyx_t_44;
int __pyx_t_45;
long __pyx_t_46;
int __pyx_t_47;
int __pyx_t_48;
long __pyx_t_49;
int __pyx_t_50;
int __pyx_t_51;
long __pyx_t_52;
int __pyx_t_53;
int __pyx_t_54;
long __pyx_t_55;
int __pyx_t_56;
int __pyx_t_57;
Py_ssize_t __pyx_t_58;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_0interpolate3d", 0);
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
__pyx_pybuffer_y.pybuffer.buf = NULL;
__pyx_pybuffer_y.refcount = 0;
__pyx_pybuffernd_y.data = NULL;
__pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y;
__pyx_pybuffer_z.pybuffer.buf = NULL;
__pyx_pybuffer_z.refcount = 0;
__pyx_pybuffernd_z.data = NULL;
__pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z;
__pyx_pybuffer_x_vals.pybuffer.buf = NULL;
__pyx_pybuffer_x_vals.refcount = 0;
__pyx_pybuffernd_x_vals.data = NULL;
__pyx_pybuffernd_x_vals.rcbuffer = &__pyx_pybuffer_x_vals;
__pyx_pybuffer_y_vals.pybuffer.buf = NULL;
__pyx_pybuffer_y_vals.refcount = 0;
__pyx_pybuffernd_y_vals.data = NULL;
__pyx_pybuffernd_y_vals.rcbuffer = &__pyx_pybuffer_y_vals;
__pyx_pybuffer_z_vals.pybuffer.buf = NULL;
__pyx_pybuffer_z_vals.refcount = 0;
__pyx_pybuffernd_z_vals.data = NULL;
__pyx_pybuffernd_z_vals.rcbuffer = &__pyx_pybuffer_z_vals;
__pyx_pybuffer_vals.pybuffer.buf = NULL;
__pyx_pybuffer_vals.refcount = 0;
__pyx_pybuffernd_vals.data = NULL;
__pyx_pybuffernd_vals.rcbuffer = &__pyx_pybuffer_vals;
__pyx_pybuffer_result_array.pybuffer.buf = NULL;
__pyx_pybuffer_result_array.refcount = 0;
__pyx_pybuffernd_result_array.data = NULL;
__pyx_pybuffernd_result_array.rcbuffer = &__pyx_pybuffer_result_array;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_float, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_float, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_float, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_vals.rcbuffer->pybuffer, (PyObject*)__pyx_v_x_vals, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_x_vals.diminfo[0].strides = __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x_vals.diminfo[0].shape = __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y_vals.rcbuffer->pybuffer, (PyObject*)__pyx_v_y_vals, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_y_vals.diminfo[0].strides = __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y_vals.diminfo[0].shape = __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z_vals.rcbuffer->pybuffer, (PyObject*)__pyx_v_z_vals, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_z_vals.diminfo[0].strides = __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z_vals.diminfo[0].shape = __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_vals.rcbuffer->pybuffer, (PyObject*)__pyx_v_vals, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_vals.diminfo[0].strides = __pyx_pybuffernd_vals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_vals.diminfo[0].shape = __pyx_pybuffernd_vals.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_vals.diminfo[1].strides = __pyx_pybuffernd_vals.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_vals.diminfo[1].shape = __pyx_pybuffernd_vals.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_vals.diminfo[2].strides = __pyx_pybuffernd_vals.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_vals.diminfo[2].shape = __pyx_pybuffernd_vals.rcbuffer->pybuffer.shape[2];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_result_array.rcbuffer->pybuffer, (PyObject*)__pyx_v_result_array, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_result_array.diminfo[0].strides = __pyx_pybuffernd_result_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_result_array.diminfo[0].shape = __pyx_pybuffernd_result_array.rcbuffer->pybuffer.shape[0];
/* "_interpolate3d.pyx":32
* cdef Py_ssize_t i
*
* for i in prange(n,nogil=True) : # <<<<<<<<<<<<<<
* if n_x_vals > 0 :
* xi = x[i]
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
__pyx_t_1 = __pyx_v_n;
if (1 == 0) abort();
{
double __pyx_parallel_temp0 = __PYX_NAN();
double __pyx_parallel_temp1 = __PYX_NAN();
int __pyx_parallel_temp2 = 0xbad0bad0;
double __pyx_parallel_temp3 = __PYX_NAN();
double __pyx_parallel_temp4 = __PYX_NAN();
double __pyx_parallel_temp5 = __PYX_NAN();
int __pyx_parallel_temp6 = 0xbad0bad0;
int __pyx_parallel_temp7 = 0xbad0bad0;
double __pyx_parallel_temp8 = __PYX_NAN();
double __pyx_parallel_temp9 = __PYX_NAN();
double __pyx_parallel_temp10 = __PYX_NAN();
double __pyx_parallel_temp11 = __PYX_NAN();
double __pyx_parallel_temp12 = __PYX_NAN();
double __pyx_parallel_temp13 = __PYX_NAN();
double __pyx_parallel_temp14 = __PYX_NAN();
int __pyx_parallel_temp15 = 0xbad0bad0;
int __pyx_parallel_temp16 = 0xbad0bad0;
double __pyx_parallel_temp17 = __PYX_NAN();
double __pyx_parallel_temp18 = __PYX_NAN();
int __pyx_parallel_temp19 = 0xbad0bad0;
Py_ssize_t __pyx_parallel_temp20 = 0xbad0bad0;
double __pyx_parallel_temp21 = __PYX_NAN();
double __pyx_parallel_temp22 = __PYX_NAN();
int __pyx_parallel_temp23 = 0xbad0bad0;
double __pyx_parallel_temp24 = __PYX_NAN();
double __pyx_parallel_temp25 = __PYX_NAN();
double __pyx_parallel_temp26 = __PYX_NAN();
double __pyx_parallel_temp27 = __PYX_NAN();
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0) / 1;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_23, __pyx_t_28, __pyx_t_38, __pyx_t_20, __pyx_t_27, __pyx_t_29, __pyx_t_44, __pyx_t_45, __pyx_t_32, __pyx_t_7, __pyx_t_21, __pyx_t_24, __pyx_t_8, __pyx_t_14, __pyx_t_19, __pyx_t_41, __pyx_t_35, __pyx_t_50, __pyx_t_13, __pyx_t_18, __pyx_t_42, __pyx_t_31, __pyx_t_57, __pyx_t_17, __pyx_t_46, __pyx_t_49, __pyx_t_53, __pyx_t_4, __pyx_t_34, __pyx_t_12, __pyx_t_37, __pyx_t_30, __pyx_t_52, __pyx_t_25, __pyx_t_11, __pyx_t_16, __pyx_t_36, __pyx_t_33, __pyx_t_56, __pyx_t_22, __pyx_t_10, __pyx_t_15, __pyx_t_39, __pyx_t_43, __pyx_t_54, __pyx_t_47, __pyx_t_51, __pyx_t_5, __pyx_t_6, __pyx_t_48, __pyx_t_9, __pyx_t_26, __pyx_t_40, __pyx_t_58, __pyx_t_55) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_v1) lastprivate(__pyx_v_yi) lastprivate(__pyx_v_z_top_ind) lastprivate(__pyx_v_v00) lastprivate(__pyx_v_v011) lastprivate(__pyx_v_v01) lastprivate(__pyx_v_x_bot_ind) lastprivate(__pyx_v_x_top_ind) lastprivate(__pyx_v_v001) lastprivate(__pyx_v_v100) lastprivate(__pyx_v_v000) lastprivate(__pyx_v_v010) lastprivate(__pyx_v_v10) lastprivate(__pyx_v_v111) lastprivate(__pyx_v_x_fac) lastprivate(__pyx_v_y_top_ind) lastprivate(__pyx_v_z_bot_ind) lastprivate(__pyx_v_xi) lastprivate(__pyx_v_zi) lastprivate(__pyx_v_mid_ind) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_v110) lastprivate(__pyx_v_v101) lastprivate(__pyx_v_y_bot_ind) lastprivate(__pyx_v_y_fac) lastprivate(__pyx_v_z_fac) lastprivate(__pyx_v_v0) lastprivate(__pyx_v_v11)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
if (__pyx_parallel_why < 2)
{
__pyx_v_i = 0 + 1 * __pyx_t_2;
/* Initialize private variables to invalid values */
__pyx_v_v1 = ((double)__PYX_NAN());
__pyx_v_yi = ((double)__PYX_NAN());
__pyx_v_z_top_ind = ((int)0xbad0bad0);
__pyx_v_v00 = ((double)__PYX_NAN());
__pyx_v_v011 = ((double)__PYX_NAN());
__pyx_v_v01 = ((double)__PYX_NAN());
__pyx_v_x_bot_ind = ((int)0xbad0bad0);
__pyx_v_x_top_ind = ((int)0xbad0bad0);
__pyx_v_v001 = ((double)__PYX_NAN());
__pyx_v_v100 = ((double)__PYX_NAN());
__pyx_v_v000 = ((double)__PYX_NAN());
__pyx_v_v010 = ((double)__PYX_NAN());
__pyx_v_v10 = ((double)__PYX_NAN());
__pyx_v_v111 = ((double)__PYX_NAN());
__pyx_v_x_fac = ((double)__PYX_NAN());
__pyx_v_y_top_ind = ((int)0xbad0bad0);
__pyx_v_z_bot_ind = ((int)0xbad0bad0);
__pyx_v_xi = ((double)__PYX_NAN());
__pyx_v_zi = ((double)__PYX_NAN());
__pyx_v_mid_ind = ((int)0xbad0bad0);
__pyx_v_v110 = ((double)__PYX_NAN());
__pyx_v_v101 = ((double)__PYX_NAN());
__pyx_v_y_bot_ind = ((int)0xbad0bad0);
__pyx_v_y_fac = ((double)__PYX_NAN());
__pyx_v_z_fac = ((double)__PYX_NAN());
__pyx_v_v0 = ((double)__PYX_NAN());
__pyx_v_v11 = ((double)__PYX_NAN());
/* "_interpolate3d.pyx":33
*
* for i in prange(n,nogil=True) :
* if n_x_vals > 0 : # <<<<<<<<<<<<<<
* xi = x[i]
* yi = y[i]
*/
__pyx_t_4 = ((__pyx_v_n_x_vals > 0) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":34
* for i in prange(n,nogil=True) :
* if n_x_vals > 0 :
* xi = x[i] # <<<<<<<<<<<<<<
* yi = y[i]
* zi = z[i]
*/
__pyx_t_5 = __pyx_v_i;
__pyx_v_xi = (*__Pyx_BufPtrStrided1d(float *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_x.diminfo[0].strides));
goto __pyx_L10;
}
__pyx_L10:;
/* "_interpolate3d.pyx":35
* if n_x_vals > 0 :
* xi = x[i]
* yi = y[i] # <<<<<<<<<<<<<<
* zi = z[i]
*
*/
__pyx_t_6 = __pyx_v_i;
__pyx_v_yi = (*__Pyx_BufPtrStrided1d(float *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_y.diminfo[0].strides));
/* "_interpolate3d.pyx":36
* xi = x[i]
* yi = y[i]
* zi = z[i] # <<<<<<<<<<<<<<
*
* if n_x_vals > 0 :
*/
__pyx_t_7 = __pyx_v_i;
__pyx_v_zi = (*__Pyx_BufPtrStrided1d(float *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_z.diminfo[0].strides));
/* "_interpolate3d.pyx":38
* zi = z[i]
*
* if n_x_vals > 0 : # <<<<<<<<<<<<<<
* # find x indices
* x_top_ind = n_x_vals - 1
*/
__pyx_t_4 = ((__pyx_v_n_x_vals > 0) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":40
* if n_x_vals > 0 :
* # find x indices
* x_top_ind = n_x_vals - 1 # <<<<<<<<<<<<<<
* x_bot_ind = 0
*
*/
__pyx_v_x_top_ind = (__pyx_v_n_x_vals - 1);
/* "_interpolate3d.pyx":41
* # find x indices
* x_top_ind = n_x_vals - 1
* x_bot_ind = 0 # <<<<<<<<<<<<<<
*
* while(x_top_ind > x_bot_ind + 1) :
*/
__pyx_v_x_bot_ind = 0;
/* "_interpolate3d.pyx":43
* x_bot_ind = 0
*
* while(x_top_ind > x_bot_ind + 1) : # <<<<<<<<<<<<<<
* mid_ind = floor((x_top_ind-x_bot_ind)/2)+x_bot_ind
* if (xi > x_vals[mid_ind]) :
*/
while (1) {
__pyx_t_4 = ((__pyx_v_x_top_ind > (__pyx_v_x_bot_ind + 1)) != 0);
if (!__pyx_t_4) break;
/* "_interpolate3d.pyx":44
*
* while(x_top_ind > x_bot_ind + 1) :
* mid_ind = floor((x_top_ind-x_bot_ind)/2)+x_bot_ind # <<<<<<<<<<<<<<
* if (xi > x_vals[mid_ind]) :
* x_bot_ind = mid_ind
*/
__pyx_v_mid_ind = (floor(__Pyx_div_long((__pyx_v_x_top_ind - __pyx_v_x_bot_ind), 2)) + __pyx_v_x_bot_ind);
/* "_interpolate3d.pyx":45
* while(x_top_ind > x_bot_ind + 1) :
* mid_ind = floor((x_top_ind-x_bot_ind)/2)+x_bot_ind
* if (xi > x_vals[mid_ind]) : # <<<<<<<<<<<<<<
* x_bot_ind = mid_ind
* else :
*/
__pyx_t_8 = __pyx_v_mid_ind;
__pyx_t_4 = ((__pyx_v_xi > (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.buf, __pyx_t_8, __pyx_pybuffernd_x_vals.diminfo[0].strides))) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":46
* mid_ind = floor((x_top_ind-x_bot_ind)/2)+x_bot_ind
* if (xi > x_vals[mid_ind]) :
* x_bot_ind = mid_ind # <<<<<<<<<<<<<<
* else :
* x_top_ind = mid_ind
*/
__pyx_v_x_bot_ind = __pyx_v_mid_ind;
goto __pyx_L14;
}
/*else*/ {
/* "_interpolate3d.pyx":48
* x_bot_ind = mid_ind
* else :
* x_top_ind = mid_ind # <<<<<<<<<<<<<<
*
* else :
*/
__pyx_v_x_top_ind = __pyx_v_mid_ind;
}
__pyx_L14:;
}
goto __pyx_L11;
}
/*else*/ {
/* "_interpolate3d.pyx":51
*
* else :
* x_top_ind = 0 # <<<<<<<<<<<<<<
* x_bot_ind = 0
*
*/
__pyx_v_x_top_ind = 0;
/* "_interpolate3d.pyx":52
* else :
* x_top_ind = 0
* x_bot_ind = 0 # <<<<<<<<<<<<<<
*
* # find y indices
*/
__pyx_v_x_bot_ind = 0;
}
__pyx_L11:;
/* "_interpolate3d.pyx":55
*
* # find y indices
* y_top_ind = n_y_vals - 1 # <<<<<<<<<<<<<<
* y_bot_ind = 0
*
*/
__pyx_v_y_top_ind = (__pyx_v_n_y_vals - 1);
/* "_interpolate3d.pyx":56
* # find y indices
* y_top_ind = n_y_vals - 1
* y_bot_ind = 0 # <<<<<<<<<<<<<<
*
* while(y_top_ind > y_bot_ind + 1) :
*/
__pyx_v_y_bot_ind = 0;
/* "_interpolate3d.pyx":58
* y_bot_ind = 0
*
* while(y_top_ind > y_bot_ind + 1) : # <<<<<<<<<<<<<<
* mid_ind = floor((y_top_ind-y_bot_ind)/2)+y_bot_ind
* if (yi > y_vals[mid_ind]) :
*/
while (1) {
__pyx_t_4 = ((__pyx_v_y_top_ind > (__pyx_v_y_bot_ind + 1)) != 0);
if (!__pyx_t_4) break;
/* "_interpolate3d.pyx":59
*
* while(y_top_ind > y_bot_ind + 1) :
* mid_ind = floor((y_top_ind-y_bot_ind)/2)+y_bot_ind # <<<<<<<<<<<<<<
* if (yi > y_vals[mid_ind]) :
* y_bot_ind = mid_ind
*/
__pyx_v_mid_ind = (floor(__Pyx_div_long((__pyx_v_y_top_ind - __pyx_v_y_bot_ind), 2)) + __pyx_v_y_bot_ind);
/* "_interpolate3d.pyx":60
* while(y_top_ind > y_bot_ind + 1) :
* mid_ind = floor((y_top_ind-y_bot_ind)/2)+y_bot_ind
* if (yi > y_vals[mid_ind]) : # <<<<<<<<<<<<<<
* y_bot_ind = mid_ind
* else :
*/
__pyx_t_9 = __pyx_v_mid_ind;
__pyx_t_4 = ((__pyx_v_yi > (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_y_vals.diminfo[0].strides))) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":61
* mid_ind = floor((y_top_ind-y_bot_ind)/2)+y_bot_ind
* if (yi > y_vals[mid_ind]) :
* y_bot_ind = mid_ind # <<<<<<<<<<<<<<
* else :
* y_top_ind = mid_ind
*/
__pyx_v_y_bot_ind = __pyx_v_mid_ind;
goto __pyx_L17;
}
/*else*/ {
/* "_interpolate3d.pyx":63
* y_bot_ind = mid_ind
* else :
* y_top_ind = mid_ind # <<<<<<<<<<<<<<
*
* # find z indices
*/
__pyx_v_y_top_ind = __pyx_v_mid_ind;
}
__pyx_L17:;
}
/* "_interpolate3d.pyx":66
*
* # find z indices
* z_top_ind = n_z_vals - 1 # <<<<<<<<<<<<<<
* z_bot_ind = 0
*
*/
__pyx_v_z_top_ind = (__pyx_v_n_z_vals - 1);
/* "_interpolate3d.pyx":67
* # find z indices
* z_top_ind = n_z_vals - 1
* z_bot_ind = 0 # <<<<<<<<<<<<<<
*
* while(z_top_ind > z_bot_ind + 1) :
*/
__pyx_v_z_bot_ind = 0;
/* "_interpolate3d.pyx":69
* z_bot_ind = 0
*
* while(z_top_ind > z_bot_ind + 1) : # <<<<<<<<<<<<<<
* mid_ind = floor((z_top_ind-z_bot_ind)/2)+z_bot_ind
* if (zi > z_vals[mid_ind]) :
*/
while (1) {
__pyx_t_4 = ((__pyx_v_z_top_ind > (__pyx_v_z_bot_ind + 1)) != 0);
if (!__pyx_t_4) break;
/* "_interpolate3d.pyx":70
*
* while(z_top_ind > z_bot_ind + 1) :
* mid_ind = floor((z_top_ind-z_bot_ind)/2)+z_bot_ind # <<<<<<<<<<<<<<
* if (zi > z_vals[mid_ind]) :
* z_bot_ind = mid_ind
*/
__pyx_v_mid_ind = (floor(__Pyx_div_long((__pyx_v_z_top_ind - __pyx_v_z_bot_ind), 2)) + __pyx_v_z_bot_ind);
/* "_interpolate3d.pyx":71
* while(z_top_ind > z_bot_ind + 1) :
* mid_ind = floor((z_top_ind-z_bot_ind)/2)+z_bot_ind
* if (zi > z_vals[mid_ind]) : # <<<<<<<<<<<<<<
* z_bot_ind = mid_ind
* else :
*/
__pyx_t_10 = __pyx_v_mid_ind;
__pyx_t_4 = ((__pyx_v_zi > (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_z_vals.diminfo[0].strides))) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":72
* mid_ind = floor((z_top_ind-z_bot_ind)/2)+z_bot_ind
* if (zi > z_vals[mid_ind]) :
* z_bot_ind = mid_ind # <<<<<<<<<<<<<<
* else :
* z_top_ind = mid_ind
*/
__pyx_v_z_bot_ind = __pyx_v_mid_ind;
goto __pyx_L20;
}
/*else*/ {
/* "_interpolate3d.pyx":74
* z_bot_ind = mid_ind
* else :
* z_top_ind = mid_ind # <<<<<<<<<<<<<<
*
* if n_x_vals > 0 :
*/
__pyx_v_z_top_ind = __pyx_v_mid_ind;
}
__pyx_L20:;
}
/* "_interpolate3d.pyx":76
* z_top_ind = mid_ind
*
* if n_x_vals > 0 : # <<<<<<<<<<<<<<
* x_fac = (xi - x_vals[x_bot_ind])/(x_vals[x_top_ind] - x_vals[x_bot_ind])
*
*/
__pyx_t_4 = ((__pyx_v_n_x_vals > 0) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":77
*
* if n_x_vals > 0 :
* x_fac = (xi - x_vals[x_bot_ind])/(x_vals[x_top_ind] - x_vals[x_bot_ind]) # <<<<<<<<<<<<<<
*
* y_fac = (yi - y_vals[y_bot_ind])/(y_vals[y_top_ind] - y_vals[y_bot_ind])
*/
__pyx_t_11 = __pyx_v_x_bot_ind;
__pyx_t_12 = (__pyx_v_xi - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_x_vals.diminfo[0].strides)));
__pyx_t_13 = __pyx_v_x_top_ind;
__pyx_t_14 = __pyx_v_x_bot_ind;
__pyx_t_15 = ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_x_vals.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x_vals.diminfo[0].strides)));
if (unlikely(__pyx_t_15 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L8_error;}
}
__pyx_v_x_fac = (__pyx_t_12 / __pyx_t_15);
goto __pyx_L21;
}
__pyx_L21:;
/* "_interpolate3d.pyx":79
* x_fac = (xi - x_vals[x_bot_ind])/(x_vals[x_top_ind] - x_vals[x_bot_ind])
*
* y_fac = (yi - y_vals[y_bot_ind])/(y_vals[y_top_ind] - y_vals[y_bot_ind]) # <<<<<<<<<<<<<<
* z_fac = (zi - z_vals[z_bot_ind])/(z_vals[z_top_ind] - z_vals[z_bot_ind])
*
*/
__pyx_t_16 = __pyx_v_y_bot_ind;
__pyx_t_15 = (__pyx_v_yi - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y_vals.diminfo[0].strides)));
__pyx_t_17 = __pyx_v_y_top_ind;
__pyx_t_18 = __pyx_v_y_bot_ind;
__pyx_t_12 = ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_y_vals.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y_vals.diminfo[0].strides)));
if (unlikely(__pyx_t_12 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L8_error;}
}
__pyx_v_y_fac = (__pyx_t_15 / __pyx_t_12);
/* "_interpolate3d.pyx":80
*
* y_fac = (yi - y_vals[y_bot_ind])/(y_vals[y_top_ind] - y_vals[y_bot_ind])
* z_fac = (zi - z_vals[z_bot_ind])/(z_vals[z_top_ind] - z_vals[z_bot_ind]) # <<<<<<<<<<<<<<
*
* # vertex values
*/
__pyx_t_19 = __pyx_v_z_bot_ind;
__pyx_t_12 = (__pyx_v_zi - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_z_vals.diminfo[0].strides)));
__pyx_t_20 = __pyx_v_z_top_ind;
__pyx_t_21 = __pyx_v_z_bot_ind;
__pyx_t_15 = ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_z_vals.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_z_vals.diminfo[0].strides)));
if (unlikely(__pyx_t_15 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L8_error;}
}
__pyx_v_z_fac = (__pyx_t_12 / __pyx_t_15);
/* "_interpolate3d.pyx":83
*
* # vertex values
* if n_x_vals > 0 : # <<<<<<<<<<<<<<
* v000 = vals[x_bot_ind,y_bot_ind,z_bot_ind]
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind]
*/
__pyx_t_4 = ((__pyx_v_n_x_vals > 0) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":84
* # vertex values
* if n_x_vals > 0 :
* v000 = vals[x_bot_ind,y_bot_ind,z_bot_ind] # <<<<<<<<<<<<<<
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind]
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind]
*/
__pyx_t_22 = __pyx_v_x_bot_ind;
__pyx_t_23 = __pyx_v_y_bot_ind;
__pyx_t_24 = __pyx_v_z_bot_ind;
__pyx_v_v000 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_24, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":85
* if n_x_vals > 0 :
* v000 = vals[x_bot_ind,y_bot_ind,z_bot_ind]
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind] # <<<<<<<<<<<<<<
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind]
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind]
*/
__pyx_t_25 = __pyx_v_x_bot_ind;
__pyx_t_26 = __pyx_v_y_bot_ind;
__pyx_t_27 = __pyx_v_z_top_ind;
__pyx_v_v001 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_25, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_26, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_27, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":86
* v000 = vals[x_bot_ind,y_bot_ind,z_bot_ind]
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind]
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind] # <<<<<<<<<<<<<<
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind]
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind]
*/
__pyx_t_28 = __pyx_v_x_bot_ind;
__pyx_t_29 = __pyx_v_y_top_ind;
__pyx_t_30 = __pyx_v_z_bot_ind;
__pyx_v_v010 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_28, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_29, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_30, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":87
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind]
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind]
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind] # <<<<<<<<<<<<<<
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind]
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind]
*/
__pyx_t_31 = __pyx_v_x_bot_ind;
__pyx_t_32 = __pyx_v_y_top_ind;
__pyx_t_33 = __pyx_v_z_top_ind;
__pyx_v_v011 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_31, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_32, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_33, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":88
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind]
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind]
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind] # <<<<<<<<<<<<<<
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind]
* v110 = vals[x_top_ind,y_top_ind,z_bot_ind]
*/
__pyx_t_34 = __pyx_v_x_top_ind;
__pyx_t_35 = __pyx_v_y_bot_ind;
__pyx_t_36 = __pyx_v_z_bot_ind;
__pyx_v_v100 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_34, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_35, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_36, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":89
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind]
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind]
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind] # <<<<<<<<<<<<<<
* v110 = vals[x_top_ind,y_top_ind,z_bot_ind]
* v111 = vals[x_top_ind,y_top_ind,z_top_ind]
*/
__pyx_t_37 = __pyx_v_x_top_ind;
__pyx_t_38 = __pyx_v_y_bot_ind;
__pyx_t_39 = __pyx_v_z_top_ind;
__pyx_v_v101 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_37, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_38, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_39, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":90
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind]
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind]
* v110 = vals[x_top_ind,y_top_ind,z_bot_ind] # <<<<<<<<<<<<<<
* v111 = vals[x_top_ind,y_top_ind,z_top_ind]
*
*/
__pyx_t_40 = __pyx_v_x_top_ind;
__pyx_t_41 = __pyx_v_y_top_ind;
__pyx_t_42 = __pyx_v_z_bot_ind;
__pyx_v_v110 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_40, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_41, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_42, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":91
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind]
* v110 = vals[x_top_ind,y_top_ind,z_bot_ind]
* v111 = vals[x_top_ind,y_top_ind,z_top_ind] # <<<<<<<<<<<<<<
*
* v00 = v000*(1.0-x_fac) + v100*x_fac
*/
__pyx_t_43 = __pyx_v_x_top_ind;
__pyx_t_44 = __pyx_v_y_top_ind;
__pyx_t_45 = __pyx_v_z_top_ind;
__pyx_v_v111 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_43, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_44, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_45, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":93
* v111 = vals[x_top_ind,y_top_ind,z_top_ind]
*
* v00 = v000*(1.0-x_fac) + v100*x_fac # <<<<<<<<<<<<<<
* v10 = v010*(1.0-x_fac) + v110*x_fac
* v01 = v001*(1.0-x_fac) + v101*x_fac
*/
__pyx_v_v00 = ((__pyx_v_v000 * (1.0 - __pyx_v_x_fac)) + (__pyx_v_v100 * __pyx_v_x_fac));
/* "_interpolate3d.pyx":94
*
* v00 = v000*(1.0-x_fac) + v100*x_fac
* v10 = v010*(1.0-x_fac) + v110*x_fac # <<<<<<<<<<<<<<
* v01 = v001*(1.0-x_fac) + v101*x_fac
* v11 = v011*(1.0-x_fac) + v111*x_fac
*/
__pyx_v_v10 = ((__pyx_v_v010 * (1.0 - __pyx_v_x_fac)) + (__pyx_v_v110 * __pyx_v_x_fac));
/* "_interpolate3d.pyx":95
* v00 = v000*(1.0-x_fac) + v100*x_fac
* v10 = v010*(1.0-x_fac) + v110*x_fac
* v01 = v001*(1.0-x_fac) + v101*x_fac # <<<<<<<<<<<<<<
* v11 = v011*(1.0-x_fac) + v111*x_fac
*
*/
__pyx_v_v01 = ((__pyx_v_v001 * (1.0 - __pyx_v_x_fac)) + (__pyx_v_v101 * __pyx_v_x_fac));
/* "_interpolate3d.pyx":96
* v10 = v010*(1.0-x_fac) + v110*x_fac
* v01 = v001*(1.0-x_fac) + v101*x_fac
* v11 = v011*(1.0-x_fac) + v111*x_fac # <<<<<<<<<<<<<<
*
* else :
*/
__pyx_v_v11 = ((__pyx_v_v011 * (1.0 - __pyx_v_x_fac)) + (__pyx_v_v111 * __pyx_v_x_fac));
goto __pyx_L22;
}
/*else*/ {
/* "_interpolate3d.pyx":99
*
* else :
* v00 = vals[0, y_bot_ind, z_bot_ind] # <<<<<<<<<<<<<<
* v01 = vals[0, y_bot_ind, z_top_ind]
* v10 = vals[0, y_top_ind, z_bot_ind]
*/
__pyx_t_46 = 0;
__pyx_t_47 = __pyx_v_y_bot_ind;
__pyx_t_48 = __pyx_v_z_bot_ind;
__pyx_v_v00 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_47, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_48, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":100
* else :
* v00 = vals[0, y_bot_ind, z_bot_ind]
* v01 = vals[0, y_bot_ind, z_top_ind] # <<<<<<<<<<<<<<
* v10 = vals[0, y_top_ind, z_bot_ind]
* v11 = vals[0, y_top_ind, z_top_ind]
*/
__pyx_t_49 = 0;
__pyx_t_50 = __pyx_v_y_bot_ind;
__pyx_t_51 = __pyx_v_z_top_ind;
__pyx_v_v01 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_49, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_50, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_51, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":101
* v00 = vals[0, y_bot_ind, z_bot_ind]
* v01 = vals[0, y_bot_ind, z_top_ind]
* v10 = vals[0, y_top_ind, z_bot_ind] # <<<<<<<<<<<<<<
* v11 = vals[0, y_top_ind, z_top_ind]
*
*/
__pyx_t_52 = 0;
__pyx_t_53 = __pyx_v_y_top_ind;
__pyx_t_54 = __pyx_v_z_bot_ind;
__pyx_v_v10 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_52, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_53, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_54, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":102
* v01 = vals[0, y_bot_ind, z_top_ind]
* v10 = vals[0, y_top_ind, z_bot_ind]
* v11 = vals[0, y_top_ind, z_top_ind] # <<<<<<<<<<<<<<
*
* v0 = v00*(1.0-y_fac) + v10*y_fac
*/
__pyx_t_55 = 0;
__pyx_t_56 = __pyx_v_y_top_ind;
__pyx_t_57 = __pyx_v_z_top_ind;
__pyx_v_v11 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_55, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_56, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_57, __pyx_pybuffernd_vals.diminfo[2].strides));
}
__pyx_L22:;
/* "_interpolate3d.pyx":104
* v11 = vals[0, y_top_ind, z_top_ind]
*
* v0 = v00*(1.0-y_fac) + v10*y_fac # <<<<<<<<<<<<<<
* v1 = v01*(1.0-y_fac) + v11*y_fac
*
*/
__pyx_v_v0 = ((__pyx_v_v00 * (1.0 - __pyx_v_y_fac)) + (__pyx_v_v10 * __pyx_v_y_fac));
/* "_interpolate3d.pyx":105
*
* v0 = v00*(1.0-y_fac) + v10*y_fac
* v1 = v01*(1.0-y_fac) + v11*y_fac # <<<<<<<<<<<<<<
*
* result_array[i] = v0*(1-z_fac) + v1*z_fac
*/
__pyx_v_v1 = ((__pyx_v_v01 * (1.0 - __pyx_v_y_fac)) + (__pyx_v_v11 * __pyx_v_y_fac));
/* "_interpolate3d.pyx":107
* v1 = v01*(1.0-y_fac) + v11*y_fac
*
* result_array[i] = v0*(1-z_fac) + v1*z_fac # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_58 = __pyx_v_i;
*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_result_array.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_result_array.diminfo[0].strides) = ((__pyx_v_v0 * (1.0 - __pyx_v_z_fac)) + (__pyx_v_v1 * __pyx_v_z_fac));
goto __pyx_L24;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L23;
__pyx_L23:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_v1;
__pyx_parallel_temp1 = __pyx_v_yi;
__pyx_parallel_temp2 = __pyx_v_z_top_ind;
__pyx_parallel_temp3 = __pyx_v_v00;
__pyx_parallel_temp4 = __pyx_v_v011;
__pyx_parallel_temp5 = __pyx_v_v01;
__pyx_parallel_temp6 = __pyx_v_x_bot_ind;
__pyx_parallel_temp7 = __pyx_v_x_top_ind;
__pyx_parallel_temp8 = __pyx_v_v001;
__pyx_parallel_temp9 = __pyx_v_v100;
__pyx_parallel_temp10 = __pyx_v_v000;
__pyx_parallel_temp11 = __pyx_v_v010;
__pyx_parallel_temp12 = __pyx_v_v10;
__pyx_parallel_temp13 = __pyx_v_v111;
__pyx_parallel_temp14 = __pyx_v_x_fac;
__pyx_parallel_temp15 = __pyx_v_y_top_ind;
__pyx_parallel_temp16 = __pyx_v_z_bot_ind;
__pyx_parallel_temp17 = __pyx_v_xi;
__pyx_parallel_temp18 = __pyx_v_zi;
__pyx_parallel_temp19 = __pyx_v_mid_ind;
__pyx_parallel_temp20 = __pyx_v_i;
__pyx_parallel_temp21 = __pyx_v_v110;
__pyx_parallel_temp22 = __pyx_v_v101;
__pyx_parallel_temp23 = __pyx_v_y_bot_ind;
__pyx_parallel_temp24 = __pyx_v_y_fac;
__pyx_parallel_temp25 = __pyx_v_z_fac;
__pyx_parallel_temp26 = __pyx_v_v0;
__pyx_parallel_temp27 = __pyx_v_v11;
}
__pyx_L24:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_v1 = __pyx_parallel_temp0;
__pyx_v_yi = __pyx_parallel_temp1;
__pyx_v_z_top_ind = __pyx_parallel_temp2;
__pyx_v_v00 = __pyx_parallel_temp3;
__pyx_v_v011 = __pyx_parallel_temp4;
__pyx_v_v01 = __pyx_parallel_temp5;
__pyx_v_x_bot_ind = __pyx_parallel_temp6;
__pyx_v_x_top_ind = __pyx_parallel_temp7;
__pyx_v_v001 = __pyx_parallel_temp8;
__pyx_v_v100 = __pyx_parallel_temp9;
__pyx_v_v000 = __pyx_parallel_temp10;
__pyx_v_v010 = __pyx_parallel_temp11;
__pyx_v_v10 = __pyx_parallel_temp12;
__pyx_v_v111 = __pyx_parallel_temp13;
__pyx_v_x_fac = __pyx_parallel_temp14;
__pyx_v_y_top_ind = __pyx_parallel_temp15;
__pyx_v_z_bot_ind = __pyx_parallel_temp16;
__pyx_v_xi = __pyx_parallel_temp17;
__pyx_v_zi = __pyx_parallel_temp18;
__pyx_v_mid_ind = __pyx_parallel_temp19;
__pyx_v_i = __pyx_parallel_temp20;
__pyx_v_v110 = __pyx_parallel_temp21;
__pyx_v_v101 = __pyx_parallel_temp22;
__pyx_v_y_bot_ind = __pyx_parallel_temp23;
__pyx_v_y_fac = __pyx_parallel_temp24;
__pyx_v_z_fac = __pyx_parallel_temp25;
__pyx_v_v0 = __pyx_parallel_temp26;
__pyx_v_v11 = __pyx_parallel_temp27;
switch (__pyx_parallel_why) {
case 3: goto __pyx_L3_return;
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "_interpolate3d.pyx":32
* cdef Py_ssize_t i
*
* for i in prange(n,nogil=True) : # <<<<<<<<<<<<<<
* if n_x_vals > 0 :
* xi = x[i]
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L3_return: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L0;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
}
/* "_interpolate3d.pyx":14
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def interpolate3d(int n, # <<<<<<<<<<<<<<
* np.ndarray[floating,ndim=1] x,
* np.ndarray[floating,ndim=1] y,
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result_array.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z_vals.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("_interpolate3d.interpolate3d", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result_array.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z_vals.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_1__pyx_pw_14_interpolate3d_5interpolate3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_1__pyx_mdef_14_interpolate3d_5interpolate3d = {"__pyx_fuse_1interpolate3d", (PyCFunction)__pyx_fuse_1__pyx_pw_14_interpolate3d_5interpolate3d, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_fuse_1__pyx_pw_14_interpolate3d_5interpolate3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
CYTHON_UNUSED int __pyx_v_n;
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_y = 0;
PyArrayObject *__pyx_v_z = 0;
int __pyx_v_n_x_vals;
PyArrayObject *__pyx_v_x_vals = 0;
int __pyx_v_n_y_vals;
PyArrayObject *__pyx_v_y_vals = 0;
int __pyx_v_n_z_vals;
PyArrayObject *__pyx_v_z_vals = 0;
PyArrayObject *__pyx_v_vals = 0;
PyArrayObject *__pyx_v_result_array = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("interpolate3d (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_n,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z,&__pyx_n_s_n_x_vals,&__pyx_n_s_x_vals,&__pyx_n_s_n_y_vals,&__pyx_n_s_y_vals,&__pyx_n_s_n_z_vals,&__pyx_n_s_z_vals,&__pyx_n_s_vals,&__pyx_n_s_result_array,0};
PyObject* values[12] = {0,0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n_x_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n_y_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 8:
if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n_z_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 9:
if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 10:
if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_vals)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 11:
if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_result_array)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "interpolate3d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 12) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
values[11] = PyTuple_GET_ITEM(__pyx_args, 11);
}
__pyx_v_n = __Pyx_PyInt_As_int(values[0]); if (unlikely((__pyx_v_n == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_x = ((PyArrayObject *)values[1]);
__pyx_v_y = ((PyArrayObject *)values[2]);
__pyx_v_z = ((PyArrayObject *)values[3]);
__pyx_v_n_x_vals = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_n_x_vals == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_x_vals = ((PyArrayObject *)values[5]);
__pyx_v_n_y_vals = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_n_y_vals == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_y_vals = ((PyArrayObject *)values[7]);
__pyx_v_n_z_vals = __Pyx_PyInt_As_int(values[8]); if (unlikely((__pyx_v_n_z_vals == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_z_vals = ((PyArrayObject *)values[9]);
__pyx_v_vals = ((PyArrayObject *)values[10]);
__pyx_v_result_array = ((PyArrayObject *)values[11]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("interpolate3d", 1, 12, 12, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("_interpolate3d.interpolate3d", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 1, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_z), __pyx_ptype_5numpy_ndarray, 1, "z", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x_vals), __pyx_ptype_5numpy_ndarray, 1, "x_vals", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y_vals), __pyx_ptype_5numpy_ndarray, 1, "y_vals", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_z_vals), __pyx_ptype_5numpy_ndarray, 1, "z_vals", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_vals), __pyx_ptype_5numpy_ndarray, 1, "vals", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_result_array), __pyx_ptype_5numpy_ndarray, 1, "result_array", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_pf_14_interpolate3d_4interpolate3d(__pyx_self, __pyx_v_n, __pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_n_x_vals, __pyx_v_x_vals, __pyx_v_n_y_vals, __pyx_v_y_vals, __pyx_v_n_z_vals, __pyx_v_z_vals, __pyx_v_vals, __pyx_v_result_array);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_14_interpolate3d_4interpolate3d(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED int __pyx_v_n, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, PyArrayObject *__pyx_v_z, int __pyx_v_n_x_vals, PyArrayObject *__pyx_v_x_vals, int __pyx_v_n_y_vals, PyArrayObject *__pyx_v_y_vals, int __pyx_v_n_z_vals, PyArrayObject *__pyx_v_z_vals, PyArrayObject *__pyx_v_vals, PyArrayObject *__pyx_v_result_array) {
int __pyx_v_x_top_ind;
int __pyx_v_x_bot_ind;
int __pyx_v_y_top_ind;
int __pyx_v_y_bot_ind;
int __pyx_v_z_top_ind;
int __pyx_v_z_bot_ind;
int __pyx_v_mid_ind;
double __pyx_v_x_fac;
double __pyx_v_y_fac;
double __pyx_v_z_fac;
double __pyx_v_v0;
double __pyx_v_v1;
double __pyx_v_v00;
double __pyx_v_v01;
double __pyx_v_v10;
double __pyx_v_v11;
double __pyx_v_v000;
double __pyx_v_v001;
double __pyx_v_v010;
double __pyx_v_v011;
double __pyx_v_v100;
double __pyx_v_v101;
double __pyx_v_v110;
double __pyx_v_v111;
double __pyx_v_xi;
double __pyx_v_yi;
double __pyx_v_zi;
Py_ssize_t __pyx_v_i;
__Pyx_LocalBuf_ND __pyx_pybuffernd_result_array;
__Pyx_Buffer __pyx_pybuffer_result_array;
__Pyx_LocalBuf_ND __pyx_pybuffernd_vals;
__Pyx_Buffer __pyx_pybuffer_vals;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x_vals;
__Pyx_Buffer __pyx_pybuffer_x_vals;
__Pyx_LocalBuf_ND __pyx_pybuffernd_y;
__Pyx_Buffer __pyx_pybuffer_y;
__Pyx_LocalBuf_ND __pyx_pybuffernd_y_vals;
__Pyx_Buffer __pyx_pybuffer_y_vals;
__Pyx_LocalBuf_ND __pyx_pybuffernd_z;
__Pyx_Buffer __pyx_pybuffer_z;
__Pyx_LocalBuf_ND __pyx_pybuffernd_z_vals;
__Pyx_Buffer __pyx_pybuffer_z_vals;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
__pyx_t_5numpy_float64_t __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
__pyx_t_5numpy_float64_t __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
int __pyx_t_27;
int __pyx_t_28;
int __pyx_t_29;
int __pyx_t_30;
int __pyx_t_31;
int __pyx_t_32;
int __pyx_t_33;
int __pyx_t_34;
int __pyx_t_35;
int __pyx_t_36;
int __pyx_t_37;
int __pyx_t_38;
int __pyx_t_39;
int __pyx_t_40;
int __pyx_t_41;
int __pyx_t_42;
int __pyx_t_43;
int __pyx_t_44;
int __pyx_t_45;
long __pyx_t_46;
int __pyx_t_47;
int __pyx_t_48;
long __pyx_t_49;
int __pyx_t_50;
int __pyx_t_51;
long __pyx_t_52;
int __pyx_t_53;
int __pyx_t_54;
long __pyx_t_55;
int __pyx_t_56;
int __pyx_t_57;
Py_ssize_t __pyx_t_58;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_1interpolate3d", 0);
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
__pyx_pybuffer_y.pybuffer.buf = NULL;
__pyx_pybuffer_y.refcount = 0;
__pyx_pybuffernd_y.data = NULL;
__pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y;
__pyx_pybuffer_z.pybuffer.buf = NULL;
__pyx_pybuffer_z.refcount = 0;
__pyx_pybuffernd_z.data = NULL;
__pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z;
__pyx_pybuffer_x_vals.pybuffer.buf = NULL;
__pyx_pybuffer_x_vals.refcount = 0;
__pyx_pybuffernd_x_vals.data = NULL;
__pyx_pybuffernd_x_vals.rcbuffer = &__pyx_pybuffer_x_vals;
__pyx_pybuffer_y_vals.pybuffer.buf = NULL;
__pyx_pybuffer_y_vals.refcount = 0;
__pyx_pybuffernd_y_vals.data = NULL;
__pyx_pybuffernd_y_vals.rcbuffer = &__pyx_pybuffer_y_vals;
__pyx_pybuffer_z_vals.pybuffer.buf = NULL;
__pyx_pybuffer_z_vals.refcount = 0;
__pyx_pybuffernd_z_vals.data = NULL;
__pyx_pybuffernd_z_vals.rcbuffer = &__pyx_pybuffer_z_vals;
__pyx_pybuffer_vals.pybuffer.buf = NULL;
__pyx_pybuffer_vals.refcount = 0;
__pyx_pybuffernd_vals.data = NULL;
__pyx_pybuffernd_vals.rcbuffer = &__pyx_pybuffer_vals;
__pyx_pybuffer_result_array.pybuffer.buf = NULL;
__pyx_pybuffer_result_array.refcount = 0;
__pyx_pybuffernd_result_array.data = NULL;
__pyx_pybuffernd_result_array.rcbuffer = &__pyx_pybuffer_result_array;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_vals.rcbuffer->pybuffer, (PyObject*)__pyx_v_x_vals, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_x_vals.diminfo[0].strides = __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x_vals.diminfo[0].shape = __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y_vals.rcbuffer->pybuffer, (PyObject*)__pyx_v_y_vals, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_y_vals.diminfo[0].strides = __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y_vals.diminfo[0].shape = __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z_vals.rcbuffer->pybuffer, (PyObject*)__pyx_v_z_vals, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_z_vals.diminfo[0].strides = __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z_vals.diminfo[0].shape = __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_vals.rcbuffer->pybuffer, (PyObject*)__pyx_v_vals, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_vals.diminfo[0].strides = __pyx_pybuffernd_vals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_vals.diminfo[0].shape = __pyx_pybuffernd_vals.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_vals.diminfo[1].strides = __pyx_pybuffernd_vals.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_vals.diminfo[1].shape = __pyx_pybuffernd_vals.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_vals.diminfo[2].strides = __pyx_pybuffernd_vals.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_vals.diminfo[2].shape = __pyx_pybuffernd_vals.rcbuffer->pybuffer.shape[2];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_result_array.rcbuffer->pybuffer, (PyObject*)__pyx_v_result_array, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_pybuffernd_result_array.diminfo[0].strides = __pyx_pybuffernd_result_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_result_array.diminfo[0].shape = __pyx_pybuffernd_result_array.rcbuffer->pybuffer.shape[0];
/* "_interpolate3d.pyx":32
* cdef Py_ssize_t i
*
* for i in prange(n,nogil=True) : # <<<<<<<<<<<<<<
* if n_x_vals > 0 :
* xi = x[i]
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
__pyx_t_1 = __pyx_v_n;
if (1 == 0) abort();
{
double __pyx_parallel_temp0 = __PYX_NAN();
double __pyx_parallel_temp1 = __PYX_NAN();
double __pyx_parallel_temp2 = __PYX_NAN();
double __pyx_parallel_temp3 = __PYX_NAN();
int __pyx_parallel_temp4 = 0xbad0bad0;
double __pyx_parallel_temp5 = __PYX_NAN();
double __pyx_parallel_temp6 = __PYX_NAN();
int __pyx_parallel_temp7 = 0xbad0bad0;
double __pyx_parallel_temp8 = __PYX_NAN();
int __pyx_parallel_temp9 = 0xbad0bad0;
double __pyx_parallel_temp10 = __PYX_NAN();
double __pyx_parallel_temp11 = __PYX_NAN();
double __pyx_parallel_temp12 = __PYX_NAN();
double __pyx_parallel_temp13 = __PYX_NAN();
int __pyx_parallel_temp14 = 0xbad0bad0;
double __pyx_parallel_temp15 = __PYX_NAN();
int __pyx_parallel_temp16 = 0xbad0bad0;
double __pyx_parallel_temp17 = __PYX_NAN();
double __pyx_parallel_temp18 = __PYX_NAN();
double __pyx_parallel_temp19 = __PYX_NAN();
double __pyx_parallel_temp20 = __PYX_NAN();
int __pyx_parallel_temp21 = 0xbad0bad0;
int __pyx_parallel_temp22 = 0xbad0bad0;
double __pyx_parallel_temp23 = __PYX_NAN();
double __pyx_parallel_temp24 = __PYX_NAN();
Py_ssize_t __pyx_parallel_temp25 = 0xbad0bad0;
double __pyx_parallel_temp26 = __PYX_NAN();
double __pyx_parallel_temp27 = __PYX_NAN();
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0) / 1;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_23, __pyx_t_28, __pyx_t_38, __pyx_t_20, __pyx_t_27, __pyx_t_29, __pyx_t_44, __pyx_t_45, __pyx_t_32, __pyx_t_7, __pyx_t_21, __pyx_t_24, __pyx_t_8, __pyx_t_14, __pyx_t_19, __pyx_t_41, __pyx_t_35, __pyx_t_50, __pyx_t_13, __pyx_t_18, __pyx_t_42, __pyx_t_31, __pyx_t_57, __pyx_t_17, __pyx_t_46, __pyx_t_49, __pyx_t_53, __pyx_t_4, __pyx_t_34, __pyx_t_12, __pyx_t_37, __pyx_t_30, __pyx_t_52, __pyx_t_25, __pyx_t_11, __pyx_t_16, __pyx_t_36, __pyx_t_33, __pyx_t_56, __pyx_t_22, __pyx_t_10, __pyx_t_15, __pyx_t_39, __pyx_t_43, __pyx_t_54, __pyx_t_47, __pyx_t_51, __pyx_t_5, __pyx_t_6, __pyx_t_48, __pyx_t_9, __pyx_t_26, __pyx_t_40, __pyx_t_58, __pyx_t_55) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_y_fac) lastprivate(__pyx_v_z_fac) lastprivate(__pyx_v_v10) lastprivate(__pyx_v_v010) lastprivate(__pyx_v_z_bot_ind) lastprivate(__pyx_v_yi) lastprivate(__pyx_v_v1) lastprivate(__pyx_v_x_bot_ind) lastprivate(__pyx_v_v0) lastprivate(__pyx_v_x_top_ind) lastprivate(__pyx_v_xi) lastprivate(__pyx_v_v011) lastprivate(__pyx_v_v00) lastprivate(__pyx_v_v100) lastprivate(__pyx_v_z_top_ind) lastprivate(__pyx_v_v110) lastprivate(__pyx_v_y_bot_ind) lastprivate(__pyx_v_v01) lastprivate(__pyx_v_v101) lastprivate(__pyx_v_zi) lastprivate(__pyx_v_v111) lastprivate(__pyx_v_y_top_ind) lastprivate(__pyx_v_mid_ind) lastprivate(__pyx_v_v000) lastprivate(__pyx_v_v001) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_v11) lastprivate(__pyx_v_x_fac)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
if (__pyx_parallel_why < 2)
{
__pyx_v_i = 0 + 1 * __pyx_t_2;
/* Initialize private variables to invalid values */
__pyx_v_y_fac = ((double)__PYX_NAN());
__pyx_v_z_fac = ((double)__PYX_NAN());
__pyx_v_v10 = ((double)__PYX_NAN());
__pyx_v_v010 = ((double)__PYX_NAN());
__pyx_v_z_bot_ind = ((int)0xbad0bad0);
__pyx_v_yi = ((double)__PYX_NAN());
__pyx_v_v1 = ((double)__PYX_NAN());
__pyx_v_x_bot_ind = ((int)0xbad0bad0);
__pyx_v_v0 = ((double)__PYX_NAN());
__pyx_v_x_top_ind = ((int)0xbad0bad0);
__pyx_v_xi = ((double)__PYX_NAN());
__pyx_v_v011 = ((double)__PYX_NAN());
__pyx_v_v00 = ((double)__PYX_NAN());
__pyx_v_v100 = ((double)__PYX_NAN());
__pyx_v_z_top_ind = ((int)0xbad0bad0);
__pyx_v_v110 = ((double)__PYX_NAN());
__pyx_v_y_bot_ind = ((int)0xbad0bad0);
__pyx_v_v01 = ((double)__PYX_NAN());
__pyx_v_v101 = ((double)__PYX_NAN());
__pyx_v_zi = ((double)__PYX_NAN());
__pyx_v_v111 = ((double)__PYX_NAN());
__pyx_v_y_top_ind = ((int)0xbad0bad0);
__pyx_v_mid_ind = ((int)0xbad0bad0);
__pyx_v_v000 = ((double)__PYX_NAN());
__pyx_v_v001 = ((double)__PYX_NAN());
__pyx_v_v11 = ((double)__PYX_NAN());
__pyx_v_x_fac = ((double)__PYX_NAN());
/* "_interpolate3d.pyx":33
*
* for i in prange(n,nogil=True) :
* if n_x_vals > 0 : # <<<<<<<<<<<<<<
* xi = x[i]
* yi = y[i]
*/
__pyx_t_4 = ((__pyx_v_n_x_vals > 0) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":34
* for i in prange(n,nogil=True) :
* if n_x_vals > 0 :
* xi = x[i] # <<<<<<<<<<<<<<
* yi = y[i]
* zi = z[i]
*/
__pyx_t_5 = __pyx_v_i;
__pyx_v_xi = (*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_x.diminfo[0].strides));
goto __pyx_L10;
}
__pyx_L10:;
/* "_interpolate3d.pyx":35
* if n_x_vals > 0 :
* xi = x[i]
* yi = y[i] # <<<<<<<<<<<<<<
* zi = z[i]
*
*/
__pyx_t_6 = __pyx_v_i;
__pyx_v_yi = (*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_y.diminfo[0].strides));
/* "_interpolate3d.pyx":36
* xi = x[i]
* yi = y[i]
* zi = z[i] # <<<<<<<<<<<<<<
*
* if n_x_vals > 0 :
*/
__pyx_t_7 = __pyx_v_i;
__pyx_v_zi = (*__Pyx_BufPtrStrided1d(double *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_z.diminfo[0].strides));
/* "_interpolate3d.pyx":38
* zi = z[i]
*
* if n_x_vals > 0 : # <<<<<<<<<<<<<<
* # find x indices
* x_top_ind = n_x_vals - 1
*/
__pyx_t_4 = ((__pyx_v_n_x_vals > 0) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":40
* if n_x_vals > 0 :
* # find x indices
* x_top_ind = n_x_vals - 1 # <<<<<<<<<<<<<<
* x_bot_ind = 0
*
*/
__pyx_v_x_top_ind = (__pyx_v_n_x_vals - 1);
/* "_interpolate3d.pyx":41
* # find x indices
* x_top_ind = n_x_vals - 1
* x_bot_ind = 0 # <<<<<<<<<<<<<<
*
* while(x_top_ind > x_bot_ind + 1) :
*/
__pyx_v_x_bot_ind = 0;
/* "_interpolate3d.pyx":43
* x_bot_ind = 0
*
* while(x_top_ind > x_bot_ind + 1) : # <<<<<<<<<<<<<<
* mid_ind = floor((x_top_ind-x_bot_ind)/2)+x_bot_ind
* if (xi > x_vals[mid_ind]) :
*/
while (1) {
__pyx_t_4 = ((__pyx_v_x_top_ind > (__pyx_v_x_bot_ind + 1)) != 0);
if (!__pyx_t_4) break;
/* "_interpolate3d.pyx":44
*
* while(x_top_ind > x_bot_ind + 1) :
* mid_ind = floor((x_top_ind-x_bot_ind)/2)+x_bot_ind # <<<<<<<<<<<<<<
* if (xi > x_vals[mid_ind]) :
* x_bot_ind = mid_ind
*/
__pyx_v_mid_ind = (floor(__Pyx_div_long((__pyx_v_x_top_ind - __pyx_v_x_bot_ind), 2)) + __pyx_v_x_bot_ind);
/* "_interpolate3d.pyx":45
* while(x_top_ind > x_bot_ind + 1) :
* mid_ind = floor((x_top_ind-x_bot_ind)/2)+x_bot_ind
* if (xi > x_vals[mid_ind]) : # <<<<<<<<<<<<<<
* x_bot_ind = mid_ind
* else :
*/
__pyx_t_8 = __pyx_v_mid_ind;
__pyx_t_4 = ((__pyx_v_xi > (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.buf, __pyx_t_8, __pyx_pybuffernd_x_vals.diminfo[0].strides))) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":46
* mid_ind = floor((x_top_ind-x_bot_ind)/2)+x_bot_ind
* if (xi > x_vals[mid_ind]) :
* x_bot_ind = mid_ind # <<<<<<<<<<<<<<
* else :
* x_top_ind = mid_ind
*/
__pyx_v_x_bot_ind = __pyx_v_mid_ind;
goto __pyx_L14;
}
/*else*/ {
/* "_interpolate3d.pyx":48
* x_bot_ind = mid_ind
* else :
* x_top_ind = mid_ind # <<<<<<<<<<<<<<
*
* else :
*/
__pyx_v_x_top_ind = __pyx_v_mid_ind;
}
__pyx_L14:;
}
goto __pyx_L11;
}
/*else*/ {
/* "_interpolate3d.pyx":51
*
* else :
* x_top_ind = 0 # <<<<<<<<<<<<<<
* x_bot_ind = 0
*
*/
__pyx_v_x_top_ind = 0;
/* "_interpolate3d.pyx":52
* else :
* x_top_ind = 0
* x_bot_ind = 0 # <<<<<<<<<<<<<<
*
* # find y indices
*/
__pyx_v_x_bot_ind = 0;
}
__pyx_L11:;
/* "_interpolate3d.pyx":55
*
* # find y indices
* y_top_ind = n_y_vals - 1 # <<<<<<<<<<<<<<
* y_bot_ind = 0
*
*/
__pyx_v_y_top_ind = (__pyx_v_n_y_vals - 1);
/* "_interpolate3d.pyx":56
* # find y indices
* y_top_ind = n_y_vals - 1
* y_bot_ind = 0 # <<<<<<<<<<<<<<
*
* while(y_top_ind > y_bot_ind + 1) :
*/
__pyx_v_y_bot_ind = 0;
/* "_interpolate3d.pyx":58
* y_bot_ind = 0
*
* while(y_top_ind > y_bot_ind + 1) : # <<<<<<<<<<<<<<
* mid_ind = floor((y_top_ind-y_bot_ind)/2)+y_bot_ind
* if (yi > y_vals[mid_ind]) :
*/
while (1) {
__pyx_t_4 = ((__pyx_v_y_top_ind > (__pyx_v_y_bot_ind + 1)) != 0);
if (!__pyx_t_4) break;
/* "_interpolate3d.pyx":59
*
* while(y_top_ind > y_bot_ind + 1) :
* mid_ind = floor((y_top_ind-y_bot_ind)/2)+y_bot_ind # <<<<<<<<<<<<<<
* if (yi > y_vals[mid_ind]) :
* y_bot_ind = mid_ind
*/
__pyx_v_mid_ind = (floor(__Pyx_div_long((__pyx_v_y_top_ind - __pyx_v_y_bot_ind), 2)) + __pyx_v_y_bot_ind);
/* "_interpolate3d.pyx":60
* while(y_top_ind > y_bot_ind + 1) :
* mid_ind = floor((y_top_ind-y_bot_ind)/2)+y_bot_ind
* if (yi > y_vals[mid_ind]) : # <<<<<<<<<<<<<<
* y_bot_ind = mid_ind
* else :
*/
__pyx_t_9 = __pyx_v_mid_ind;
__pyx_t_4 = ((__pyx_v_yi > (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_y_vals.diminfo[0].strides))) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":61
* mid_ind = floor((y_top_ind-y_bot_ind)/2)+y_bot_ind
* if (yi > y_vals[mid_ind]) :
* y_bot_ind = mid_ind # <<<<<<<<<<<<<<
* else :
* y_top_ind = mid_ind
*/
__pyx_v_y_bot_ind = __pyx_v_mid_ind;
goto __pyx_L17;
}
/*else*/ {
/* "_interpolate3d.pyx":63
* y_bot_ind = mid_ind
* else :
* y_top_ind = mid_ind # <<<<<<<<<<<<<<
*
* # find z indices
*/
__pyx_v_y_top_ind = __pyx_v_mid_ind;
}
__pyx_L17:;
}
/* "_interpolate3d.pyx":66
*
* # find z indices
* z_top_ind = n_z_vals - 1 # <<<<<<<<<<<<<<
* z_bot_ind = 0
*
*/
__pyx_v_z_top_ind = (__pyx_v_n_z_vals - 1);
/* "_interpolate3d.pyx":67
* # find z indices
* z_top_ind = n_z_vals - 1
* z_bot_ind = 0 # <<<<<<<<<<<<<<
*
* while(z_top_ind > z_bot_ind + 1) :
*/
__pyx_v_z_bot_ind = 0;
/* "_interpolate3d.pyx":69
* z_bot_ind = 0
*
* while(z_top_ind > z_bot_ind + 1) : # <<<<<<<<<<<<<<
* mid_ind = floor((z_top_ind-z_bot_ind)/2)+z_bot_ind
* if (zi > z_vals[mid_ind]) :
*/
while (1) {
__pyx_t_4 = ((__pyx_v_z_top_ind > (__pyx_v_z_bot_ind + 1)) != 0);
if (!__pyx_t_4) break;
/* "_interpolate3d.pyx":70
*
* while(z_top_ind > z_bot_ind + 1) :
* mid_ind = floor((z_top_ind-z_bot_ind)/2)+z_bot_ind # <<<<<<<<<<<<<<
* if (zi > z_vals[mid_ind]) :
* z_bot_ind = mid_ind
*/
__pyx_v_mid_ind = (floor(__Pyx_div_long((__pyx_v_z_top_ind - __pyx_v_z_bot_ind), 2)) + __pyx_v_z_bot_ind);
/* "_interpolate3d.pyx":71
* while(z_top_ind > z_bot_ind + 1) :
* mid_ind = floor((z_top_ind-z_bot_ind)/2)+z_bot_ind
* if (zi > z_vals[mid_ind]) : # <<<<<<<<<<<<<<
* z_bot_ind = mid_ind
* else :
*/
__pyx_t_10 = __pyx_v_mid_ind;
__pyx_t_4 = ((__pyx_v_zi > (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_z_vals.diminfo[0].strides))) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":72
* mid_ind = floor((z_top_ind-z_bot_ind)/2)+z_bot_ind
* if (zi > z_vals[mid_ind]) :
* z_bot_ind = mid_ind # <<<<<<<<<<<<<<
* else :
* z_top_ind = mid_ind
*/
__pyx_v_z_bot_ind = __pyx_v_mid_ind;
goto __pyx_L20;
}
/*else*/ {
/* "_interpolate3d.pyx":74
* z_bot_ind = mid_ind
* else :
* z_top_ind = mid_ind # <<<<<<<<<<<<<<
*
* if n_x_vals > 0 :
*/
__pyx_v_z_top_ind = __pyx_v_mid_ind;
}
__pyx_L20:;
}
/* "_interpolate3d.pyx":76
* z_top_ind = mid_ind
*
* if n_x_vals > 0 : # <<<<<<<<<<<<<<
* x_fac = (xi - x_vals[x_bot_ind])/(x_vals[x_top_ind] - x_vals[x_bot_ind])
*
*/
__pyx_t_4 = ((__pyx_v_n_x_vals > 0) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":77
*
* if n_x_vals > 0 :
* x_fac = (xi - x_vals[x_bot_ind])/(x_vals[x_top_ind] - x_vals[x_bot_ind]) # <<<<<<<<<<<<<<
*
* y_fac = (yi - y_vals[y_bot_ind])/(y_vals[y_top_ind] - y_vals[y_bot_ind])
*/
__pyx_t_11 = __pyx_v_x_bot_ind;
__pyx_t_12 = (__pyx_v_xi - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_x_vals.diminfo[0].strides)));
__pyx_t_13 = __pyx_v_x_top_ind;
__pyx_t_14 = __pyx_v_x_bot_ind;
__pyx_t_15 = ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_x_vals.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_x_vals.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x_vals.diminfo[0].strides)));
if (unlikely(__pyx_t_15 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L8_error;}
}
__pyx_v_x_fac = (__pyx_t_12 / __pyx_t_15);
goto __pyx_L21;
}
__pyx_L21:;
/* "_interpolate3d.pyx":79
* x_fac = (xi - x_vals[x_bot_ind])/(x_vals[x_top_ind] - x_vals[x_bot_ind])
*
* y_fac = (yi - y_vals[y_bot_ind])/(y_vals[y_top_ind] - y_vals[y_bot_ind]) # <<<<<<<<<<<<<<
* z_fac = (zi - z_vals[z_bot_ind])/(z_vals[z_top_ind] - z_vals[z_bot_ind])
*
*/
__pyx_t_16 = __pyx_v_y_bot_ind;
__pyx_t_15 = (__pyx_v_yi - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y_vals.diminfo[0].strides)));
__pyx_t_17 = __pyx_v_y_top_ind;
__pyx_t_18 = __pyx_v_y_bot_ind;
__pyx_t_12 = ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_y_vals.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_y_vals.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y_vals.diminfo[0].strides)));
if (unlikely(__pyx_t_12 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L8_error;}
}
__pyx_v_y_fac = (__pyx_t_15 / __pyx_t_12);
/* "_interpolate3d.pyx":80
*
* y_fac = (yi - y_vals[y_bot_ind])/(y_vals[y_top_ind] - y_vals[y_bot_ind])
* z_fac = (zi - z_vals[z_bot_ind])/(z_vals[z_top_ind] - z_vals[z_bot_ind]) # <<<<<<<<<<<<<<
*
* # vertex values
*/
__pyx_t_19 = __pyx_v_z_bot_ind;
__pyx_t_12 = (__pyx_v_zi - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_z_vals.diminfo[0].strides)));
__pyx_t_20 = __pyx_v_z_top_ind;
__pyx_t_21 = __pyx_v_z_bot_ind;
__pyx_t_15 = ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_z_vals.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_z_vals.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_z_vals.diminfo[0].strides)));
if (unlikely(__pyx_t_15 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L8_error;}
}
__pyx_v_z_fac = (__pyx_t_12 / __pyx_t_15);
/* "_interpolate3d.pyx":83
*
* # vertex values
* if n_x_vals > 0 : # <<<<<<<<<<<<<<
* v000 = vals[x_bot_ind,y_bot_ind,z_bot_ind]
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind]
*/
__pyx_t_4 = ((__pyx_v_n_x_vals > 0) != 0);
if (__pyx_t_4) {
/* "_interpolate3d.pyx":84
* # vertex values
* if n_x_vals > 0 :
* v000 = vals[x_bot_ind,y_bot_ind,z_bot_ind] # <<<<<<<<<<<<<<
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind]
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind]
*/
__pyx_t_22 = __pyx_v_x_bot_ind;
__pyx_t_23 = __pyx_v_y_bot_ind;
__pyx_t_24 = __pyx_v_z_bot_ind;
__pyx_v_v000 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_24, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":85
* if n_x_vals > 0 :
* v000 = vals[x_bot_ind,y_bot_ind,z_bot_ind]
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind] # <<<<<<<<<<<<<<
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind]
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind]
*/
__pyx_t_25 = __pyx_v_x_bot_ind;
__pyx_t_26 = __pyx_v_y_bot_ind;
__pyx_t_27 = __pyx_v_z_top_ind;
__pyx_v_v001 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_25, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_26, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_27, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":86
* v000 = vals[x_bot_ind,y_bot_ind,z_bot_ind]
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind]
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind] # <<<<<<<<<<<<<<
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind]
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind]
*/
__pyx_t_28 = __pyx_v_x_bot_ind;
__pyx_t_29 = __pyx_v_y_top_ind;
__pyx_t_30 = __pyx_v_z_bot_ind;
__pyx_v_v010 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_28, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_29, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_30, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":87
* v001 = vals[x_bot_ind,y_bot_ind,z_top_ind]
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind]
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind] # <<<<<<<<<<<<<<
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind]
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind]
*/
__pyx_t_31 = __pyx_v_x_bot_ind;
__pyx_t_32 = __pyx_v_y_top_ind;
__pyx_t_33 = __pyx_v_z_top_ind;
__pyx_v_v011 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_31, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_32, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_33, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":88
* v010 = vals[x_bot_ind,y_top_ind,z_bot_ind]
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind]
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind] # <<<<<<<<<<<<<<
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind]
* v110 = vals[x_top_ind,y_top_ind,z_bot_ind]
*/
__pyx_t_34 = __pyx_v_x_top_ind;
__pyx_t_35 = __pyx_v_y_bot_ind;
__pyx_t_36 = __pyx_v_z_bot_ind;
__pyx_v_v100 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_34, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_35, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_36, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":89
* v011 = vals[x_bot_ind,y_top_ind,z_top_ind]
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind]
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind] # <<<<<<<<<<<<<<
* v110 = vals[x_top_ind,y_top_ind,z_bot_ind]
* v111 = vals[x_top_ind,y_top_ind,z_top_ind]
*/
__pyx_t_37 = __pyx_v_x_top_ind;
__pyx_t_38 = __pyx_v_y_bot_ind;
__pyx_t_39 = __pyx_v_z_top_ind;
__pyx_v_v101 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_37, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_38, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_39, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":90
* v100 = vals[x_top_ind,y_bot_ind,z_bot_ind]
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind]
* v110 = vals[x_top_ind,y_top_ind,z_bot_ind] # <<<<<<<<<<<<<<
* v111 = vals[x_top_ind,y_top_ind,z_top_ind]
*
*/
__pyx_t_40 = __pyx_v_x_top_ind;
__pyx_t_41 = __pyx_v_y_top_ind;
__pyx_t_42 = __pyx_v_z_bot_ind;
__pyx_v_v110 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_40, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_41, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_42, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":91
* v101 = vals[x_top_ind,y_bot_ind,z_top_ind]
* v110 = vals[x_top_ind,y_top_ind,z_bot_ind]
* v111 = vals[x_top_ind,y_top_ind,z_top_ind] # <<<<<<<<<<<<<<
*
* v00 = v000*(1.0-x_fac) + v100*x_fac
*/
__pyx_t_43 = __pyx_v_x_top_ind;
__pyx_t_44 = __pyx_v_y_top_ind;
__pyx_t_45 = __pyx_v_z_top_ind;
__pyx_v_v111 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_43, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_44, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_45, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":93
* v111 = vals[x_top_ind,y_top_ind,z_top_ind]
*
* v00 = v000*(1.0-x_fac) + v100*x_fac # <<<<<<<<<<<<<<
* v10 = v010*(1.0-x_fac) + v110*x_fac
* v01 = v001*(1.0-x_fac) + v101*x_fac
*/
__pyx_v_v00 = ((__pyx_v_v000 * (1.0 - __pyx_v_x_fac)) + (__pyx_v_v100 * __pyx_v_x_fac));
/* "_interpolate3d.pyx":94
*
* v00 = v000*(1.0-x_fac) + v100*x_fac
* v10 = v010*(1.0-x_fac) + v110*x_fac # <<<<<<<<<<<<<<
* v01 = v001*(1.0-x_fac) + v101*x_fac
* v11 = v011*(1.0-x_fac) + v111*x_fac
*/
__pyx_v_v10 = ((__pyx_v_v010 * (1.0 - __pyx_v_x_fac)) + (__pyx_v_v110 * __pyx_v_x_fac));
/* "_interpolate3d.pyx":95
* v00 = v000*(1.0-x_fac) + v100*x_fac
* v10 = v010*(1.0-x_fac) + v110*x_fac
* v01 = v001*(1.0-x_fac) + v101*x_fac # <<<<<<<<<<<<<<
* v11 = v011*(1.0-x_fac) + v111*x_fac
*
*/
__pyx_v_v01 = ((__pyx_v_v001 * (1.0 - __pyx_v_x_fac)) + (__pyx_v_v101 * __pyx_v_x_fac));
/* "_interpolate3d.pyx":96
* v10 = v010*(1.0-x_fac) + v110*x_fac
* v01 = v001*(1.0-x_fac) + v101*x_fac
* v11 = v011*(1.0-x_fac) + v111*x_fac # <<<<<<<<<<<<<<
*
* else :
*/
__pyx_v_v11 = ((__pyx_v_v011 * (1.0 - __pyx_v_x_fac)) + (__pyx_v_v111 * __pyx_v_x_fac));
goto __pyx_L22;
}
/*else*/ {
/* "_interpolate3d.pyx":99
*
* else :
* v00 = vals[0, y_bot_ind, z_bot_ind] # <<<<<<<<<<<<<<
* v01 = vals[0, y_bot_ind, z_top_ind]
* v10 = vals[0, y_top_ind, z_bot_ind]
*/
__pyx_t_46 = 0;
__pyx_t_47 = __pyx_v_y_bot_ind;
__pyx_t_48 = __pyx_v_z_bot_ind;
__pyx_v_v00 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_47, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_48, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":100
* else :
* v00 = vals[0, y_bot_ind, z_bot_ind]
* v01 = vals[0, y_bot_ind, z_top_ind] # <<<<<<<<<<<<<<
* v10 = vals[0, y_top_ind, z_bot_ind]
* v11 = vals[0, y_top_ind, z_top_ind]
*/
__pyx_t_49 = 0;
__pyx_t_50 = __pyx_v_y_bot_ind;
__pyx_t_51 = __pyx_v_z_top_ind;
__pyx_v_v01 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_49, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_50, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_51, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":101
* v00 = vals[0, y_bot_ind, z_bot_ind]
* v01 = vals[0, y_bot_ind, z_top_ind]
* v10 = vals[0, y_top_ind, z_bot_ind] # <<<<<<<<<<<<<<
* v11 = vals[0, y_top_ind, z_top_ind]
*
*/
__pyx_t_52 = 0;
__pyx_t_53 = __pyx_v_y_top_ind;
__pyx_t_54 = __pyx_v_z_bot_ind;
__pyx_v_v10 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_52, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_53, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_54, __pyx_pybuffernd_vals.diminfo[2].strides));
/* "_interpolate3d.pyx":102
* v01 = vals[0, y_bot_ind, z_top_ind]
* v10 = vals[0, y_top_ind, z_bot_ind]
* v11 = vals[0, y_top_ind, z_top_ind] # <<<<<<<<<<<<<<
*
* v0 = v00*(1.0-y_fac) + v10*y_fac
*/
__pyx_t_55 = 0;
__pyx_t_56 = __pyx_v_y_top_ind;
__pyx_t_57 = __pyx_v_z_top_ind;
__pyx_v_v11 = (*__Pyx_BufPtrStrided3d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_vals.rcbuffer->pybuffer.buf, __pyx_t_55, __pyx_pybuffernd_vals.diminfo[0].strides, __pyx_t_56, __pyx_pybuffernd_vals.diminfo[1].strides, __pyx_t_57, __pyx_pybuffernd_vals.diminfo[2].strides));
}
__pyx_L22:;
/* "_interpolate3d.pyx":104
* v11 = vals[0, y_top_ind, z_top_ind]
*
* v0 = v00*(1.0-y_fac) + v10*y_fac # <<<<<<<<<<<<<<
* v1 = v01*(1.0-y_fac) + v11*y_fac
*
*/
__pyx_v_v0 = ((__pyx_v_v00 * (1.0 - __pyx_v_y_fac)) + (__pyx_v_v10 * __pyx_v_y_fac));
/* "_interpolate3d.pyx":105
*
* v0 = v00*(1.0-y_fac) + v10*y_fac
* v1 = v01*(1.0-y_fac) + v11*y_fac # <<<<<<<<<<<<<<
*
* result_array[i] = v0*(1-z_fac) + v1*z_fac
*/
__pyx_v_v1 = ((__pyx_v_v01 * (1.0 - __pyx_v_y_fac)) + (__pyx_v_v11 * __pyx_v_y_fac));
/* "_interpolate3d.pyx":107
* v1 = v01*(1.0-y_fac) + v11*y_fac
*
* result_array[i] = v0*(1-z_fac) + v1*z_fac # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_58 = __pyx_v_i;
*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_result_array.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_result_array.diminfo[0].strides) = ((__pyx_v_v0 * (1.0 - __pyx_v_z_fac)) + (__pyx_v_v1 * __pyx_v_z_fac));
goto __pyx_L24;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L23;
__pyx_L23:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates1)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_y_fac;
__pyx_parallel_temp1 = __pyx_v_z_fac;
__pyx_parallel_temp2 = __pyx_v_v10;
__pyx_parallel_temp3 = __pyx_v_v010;
__pyx_parallel_temp4 = __pyx_v_z_bot_ind;
__pyx_parallel_temp5 = __pyx_v_yi;
__pyx_parallel_temp6 = __pyx_v_v1;
__pyx_parallel_temp7 = __pyx_v_x_bot_ind;
__pyx_parallel_temp8 = __pyx_v_v0;
__pyx_parallel_temp9 = __pyx_v_x_top_ind;
__pyx_parallel_temp10 = __pyx_v_xi;
__pyx_parallel_temp11 = __pyx_v_v011;
__pyx_parallel_temp12 = __pyx_v_v00;
__pyx_parallel_temp13 = __pyx_v_v100;
__pyx_parallel_temp14 = __pyx_v_z_top_ind;
__pyx_parallel_temp15 = __pyx_v_v110;
__pyx_parallel_temp16 = __pyx_v_y_bot_ind;
__pyx_parallel_temp17 = __pyx_v_v01;
__pyx_parallel_temp18 = __pyx_v_v101;
__pyx_parallel_temp19 = __pyx_v_zi;
__pyx_parallel_temp20 = __pyx_v_v111;
__pyx_parallel_temp21 = __pyx_v_y_top_ind;
__pyx_parallel_temp22 = __pyx_v_mid_ind;
__pyx_parallel_temp23 = __pyx_v_v000;
__pyx_parallel_temp24 = __pyx_v_v001;
__pyx_parallel_temp25 = __pyx_v_i;
__pyx_parallel_temp26 = __pyx_v_v11;
__pyx_parallel_temp27 = __pyx_v_x_fac;
}
__pyx_L24:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_y_fac = __pyx_parallel_temp0;
__pyx_v_z_fac = __pyx_parallel_temp1;
__pyx_v_v10 = __pyx_parallel_temp2;
__pyx_v_v010 = __pyx_parallel_temp3;
__pyx_v_z_bot_ind = __pyx_parallel_temp4;
__pyx_v_yi = __pyx_parallel_temp5;
__pyx_v_v1 = __pyx_parallel_temp6;
__pyx_v_x_bot_ind = __pyx_parallel_temp7;
__pyx_v_v0 = __pyx_parallel_temp8;
__pyx_v_x_top_ind = __pyx_parallel_temp9;
__pyx_v_xi = __pyx_parallel_temp10;
__pyx_v_v011 = __pyx_parallel_temp11;
__pyx_v_v00 = __pyx_parallel_temp12;
__pyx_v_v100 = __pyx_parallel_temp13;
__pyx_v_z_top_ind = __pyx_parallel_temp14;
__pyx_v_v110 = __pyx_parallel_temp15;
__pyx_v_y_bot_ind = __pyx_parallel_temp16;
__pyx_v_v01 = __pyx_parallel_temp17;
__pyx_v_v101 = __pyx_parallel_temp18;
__pyx_v_zi = __pyx_parallel_temp19;
__pyx_v_v111 = __pyx_parallel_temp20;
__pyx_v_y_top_ind = __pyx_parallel_temp21;
__pyx_v_mid_ind = __pyx_parallel_temp22;
__pyx_v_v000 = __pyx_parallel_temp23;
__pyx_v_v001 = __pyx_parallel_temp24;
__pyx_v_i = __pyx_parallel_temp25;
__pyx_v_v11 = __pyx_parallel_temp26;
__pyx_v_x_fac = __pyx_parallel_temp27;
switch (__pyx_parallel_why) {
case 3: goto __pyx_L3_return;
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "_interpolate3d.pyx":32
* cdef Py_ssize_t i
*
* for i in prange(n,nogil=True) : # <<<<<<<<<<<<<<
* if n_x_vals > 0 :
* xi = x[i]
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L3_return: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L0;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
}
/* "_interpolate3d.pyx":14
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def interpolate3d(int n, # <<<<<<<<<<<<<<
* np.ndarray[floating,ndim=1] x,
* np.ndarray[floating,ndim=1] y,
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result_array.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z_vals.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("_interpolate3d.interpolate3d", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_result_array.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_vals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z_vals.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "_interpolate3d.pyx":113
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef int bisect(np.float64_t x, int nval, np.float64_t [:] arr) nogil: # <<<<<<<<<<<<<<
* cdef int mid, top, bot
* top = nval - 1
*/
static int __pyx_f_14_interpolate3d_bisect(__pyx_t_5numpy_float64_t __pyx_v_x, int __pyx_v_nval, __Pyx_memviewslice __pyx_v_arr) {
int __pyx_v_mid;
int __pyx_v_top;
int __pyx_v_bot;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
/* "_interpolate3d.pyx":115
* cdef int bisect(np.float64_t x, int nval, np.float64_t [:] arr) nogil:
* cdef int mid, top, bot
* top = nval - 1 # <<<<<<<<<<<<<<
* bot = 0
*
*/
__pyx_v_top = (__pyx_v_nval - 1);
/* "_interpolate3d.pyx":116
* cdef int mid, top, bot
* top = nval - 1
* bot = 0 # <<<<<<<<<<<<<<
*
* while(top > bot + 1) :
*/
__pyx_v_bot = 0;
/* "_interpolate3d.pyx":118
* bot = 0
*
* while(top > bot + 1) : # <<<<<<<<<<<<<<
* mid = floor((top-bot)/2)+bot
* if (x > arr[mid]) :
*/
while (1) {
__pyx_t_1 = ((__pyx_v_top > (__pyx_v_bot + 1)) != 0);
if (!__pyx_t_1) break;
/* "_interpolate3d.pyx":119
*
* while(top > bot + 1) :
* mid = floor((top-bot)/2)+bot # <<<<<<<<<<<<<<
* if (x > arr[mid]) :
* bot = mid
*/
__pyx_v_mid = (floor(__Pyx_div_long((__pyx_v_top - __pyx_v_bot), 2)) + __pyx_v_bot);
/* "_interpolate3d.pyx":120
* while(top > bot + 1) :
* mid = floor((top-bot)/2)+bot
* if (x > arr[mid]) : # <<<<<<<<<<<<<<
* bot = mid
* else :
*/
__pyx_t_2 = __pyx_v_mid;
__pyx_t_1 = ((__pyx_v_x > (*((__pyx_t_5numpy_float64_t *) ( /* dim=0 */ (__pyx_v_arr.data + __pyx_t_2 * __pyx_v_arr.strides[0]) )))) != 0);
if (__pyx_t_1) {
/* "_interpolate3d.pyx":121
* mid = floor((top-bot)/2)+bot
* if (x > arr[mid]) :
* bot = mid # <<<<<<<<<<<<<<
* else :
* top = mid
*/
__pyx_v_bot = __pyx_v_mid;
goto __pyx_L5;
}
/*else*/ {
/* "_interpolate3d.pyx":123
* bot = mid
* else :
* top = mid # <<<<<<<<<<<<<<
* return bot
*/
__pyx_v_top = __pyx_v_mid;
}
__pyx_L5:;
}
/* "_interpolate3d.pyx":124
* else :
* top = mid
* return bot # <<<<<<<<<<<<<<
*/
__pyx_r = __pyx_v_bot;
goto __pyx_L0;
/* "_interpolate3d.pyx":113
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef int bisect(np.float64_t x, int nval, np.float64_t [:] arr) nogil: # <<<<<<<<<<<<<<
* cdef int mid, top, bot
* top = nval - 1
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":203
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = ((__pyx_v_info == NULL) != 0);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":206
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":207
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":209
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":212
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
goto __pyx_L4;
}
/*else*/ {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":214
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
__pyx_v_copy_shape = 0;
}
__pyx_L4:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L6_bool_binop_done;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":217
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L6_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L9_bool_binop_done;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":221
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L9_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":224
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":225
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (__pyx_v_copy_shape != 0);
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":229
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":230
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":231
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":232
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":233
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
goto __pyx_L11;
}
/*else*/ {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":235
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":236
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L11:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":237
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":238
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":239
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":242
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef list stack
*/
__pyx_v_f = NULL;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":243
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef list stack
* cdef int offset
*/
__pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":247
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":249
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L15_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L15_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":251
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
goto __pyx_L14;
}
/*else*/ {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":254
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
}
__pyx_L14:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":256
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":257
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":258
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L20_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_L20_next_or:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":259
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":260
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":277
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
switch (__pyx_v_t) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":261
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
case NPY_BYTE:
__pyx_v_f = __pyx_k_b;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":262
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
case NPY_UBYTE:
__pyx_v_f = __pyx_k_B;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":263
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
case NPY_SHORT:
__pyx_v_f = __pyx_k_h;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":264
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
case NPY_USHORT:
__pyx_v_f = __pyx_k_H;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":265
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
case NPY_INT:
__pyx_v_f = __pyx_k_i;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":266
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
case NPY_UINT:
__pyx_v_f = __pyx_k_I;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":267
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
case NPY_LONG:
__pyx_v_f = __pyx_k_l;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":268
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
case NPY_ULONG:
__pyx_v_f = __pyx_k_L;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":269
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
case NPY_LONGLONG:
__pyx_v_f = __pyx_k_q;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":270
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
case NPY_ULONGLONG:
__pyx_v_f = __pyx_k_Q;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":271
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
case NPY_FLOAT:
__pyx_v_f = __pyx_k_f;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":272
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
case NPY_DOUBLE:
__pyx_v_f = __pyx_k_d;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":273
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
case NPY_LONGDOUBLE:
__pyx_v_f = __pyx_k_g;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":274
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
case NPY_CFLOAT:
__pyx_v_f = __pyx_k_Zf;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":275
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
case NPY_CDOUBLE:
__pyx_v_f = __pyx_k_Zd;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":276
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
case NPY_CLONGDOUBLE:
__pyx_v_f = __pyx_k_Zg;
break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":277
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
case NPY_OBJECT:
__pyx_v_f = __pyx_k_O;
break;
default:
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":279
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
break;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":280
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":281
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
}
/*else*/ {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":283
* return
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
__pyx_v_info->format = ((char *)malloc(255));
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":284
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":285
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":286
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_7;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":289
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":291
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":292
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":293
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
goto __pyx_L3;
}
__pyx_L3:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":294
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":295
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
goto __pyx_L4;
}
__pyx_L4:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":291
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":771
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":772
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":771
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":774
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":775
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":774
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":777
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":778
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":777
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":780
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":781
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":780
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":783
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":784
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 784; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":783
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":786
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":793
* cdef int delta_offset
* cdef tuple i
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple i
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":797
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":798
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":799
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":801
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (__pyx_t_6) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":802
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":804
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":805
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
if (__pyx_t_6) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":806
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":816
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":817
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 120;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":818
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":819
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":821
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":823
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":824
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":825
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (__pyx_t_6) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":826
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":829
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":830
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":831
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 104;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":832
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":833
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 105;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":834
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":835
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 108;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":836
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":837
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 113;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":838
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":839
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 102;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":840
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 100;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":841
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 103;
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":842
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 102;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":843
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 100;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":844
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 103;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":845
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/*else*/ {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":847
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
__pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L15:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":848
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L13;
}
/*else*/ {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":852
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":797
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":853
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":786
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":969
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":971
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":972
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
goto __pyx_L3;
}
/*else*/ {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":974
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
Py_INCREF(__pyx_v_base);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":975
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":976
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":977
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":969
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":979
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":980
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":981
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
}
/*else*/ {
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":983
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":979
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":116
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "View.MemoryView":117
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":116
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":123
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":124
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":126
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":127
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":129
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":130
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if isinstance(format, unicode):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":132
* raise ValueError("itemsize <= 0 for cython.array")
*
* if isinstance(format, unicode): # <<<<<<<<<<<<<<
* format = (<unicode>format).encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyUnicode_Check(__pyx_v_format);
__pyx_t_4 = (__pyx_t_2 != 0);
if (__pyx_t_4) {
/* "View.MemoryView":133
*
* if isinstance(format, unicode):
* format = (<unicode>format).encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
if (unlikely(__pyx_v_format == Py_None)) {
PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%s'", "encode");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_3 = PyUnicode_AsASCIIString(((PyObject*)__pyx_v_format)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L5;
}
__pyx_L5:;
/* "View.MemoryView":134
* if isinstance(format, unicode):
* format = (<unicode>format).encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":135
* format = (<unicode>format).encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_self->format = __pyx_t_5;
/* "View.MemoryView":138
*
*
* self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyMem_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":139
*
* self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":141
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":142
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":145
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_6 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_7); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_7); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_v_dim = __pyx_t_8;
__pyx_v_idx = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":146
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":147
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_10);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
__pyx_t_7 = 0;
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_10);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__Pyx_Raise(__pyx_t_9, 0, 0, 0);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":148
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":145
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":151
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":152
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":153
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
goto __pyx_L10;
}
/* "View.MemoryView":154
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":155
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":156
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
goto __pyx_L10;
}
/*else*/ {
/* "View.MemoryView":158
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L10:;
/* "View.MemoryView":160
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":163
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":164
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_3 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":165
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":168
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":169
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":170
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":172
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":173
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":174
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "View.MemoryView":175
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":176
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
goto __pyx_L13;
}
__pyx_L13:;
goto __pyx_L11;
}
__pyx_L11:;
/* "View.MemoryView":116
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":179
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":180
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":181
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":182
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
goto __pyx_L3;
}
/* "View.MemoryView":183
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":184
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":185
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":186
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":187
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":188
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":189
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":190
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":191
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":192
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":193
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":194
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":196
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":197
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":199
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":201
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":179
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":205
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":206
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":207
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
goto __pyx_L3;
}
/* "View.MemoryView":208
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":209
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":210
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":212
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyMem_Free(self._shape)
*
*/
free(__pyx_v_self->data);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":213
* self._strides, self.ndim, False)
* free(self.data)
* PyMem_Free(self._shape) # <<<<<<<<<<<<<<
*
* property memview:
*/
PyMem_Free(__pyx_v_self->_shape);
/* "View.MemoryView":205
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":217
* property memview:
* @cname('get_memview')
* def __get__(self): # <<<<<<<<<<<<<<
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/
/* Python wrapper */
static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/
static PyObject *get_memview(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":219
* def __get__(self):
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":220
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":217
* property memview:
* @cname('get_memview')
* def __get__(self): # <<<<<<<<<<<<<<
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":223
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":224
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":223
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":227
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":229
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":230
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":229
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":234
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":238
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":239
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":241
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":242
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":241
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":243
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":245
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":234
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":271
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":272
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":271
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":273
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":274
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":273
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":288
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":290
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":294
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":296
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":297
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":299
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview')
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":288
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":317
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":318
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":319
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":320
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)((PyObject *)__pyx_memoryview_type)));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":321
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 321; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":322
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":323
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":324
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* self.lock = PyThread_allocate_lock()
*/
Py_INCREF(Py_None);
goto __pyx_L6;
}
__pyx_L6:;
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":326
* Py_INCREF(Py_None)
*
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock == NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":327
*
* self.lock = PyThread_allocate_lock()
* if self.lock == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":328
* self.lock = PyThread_allocate_lock()
* if self.lock == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":330
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = self.view.format == b'O'
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":331
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = self.view.format == b'O' # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_1;
goto __pyx_L8;
}
/*else*/ {
/* "View.MemoryView":333
* self.dtype_is_object = self.view.format == b'O'
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L8:;
/* "View.MemoryView":335
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":337
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":317
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":339
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":340
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":341
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
*
* if self.lock != NULL:
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":343
* __Pyx_ReleaseBuffer(&self.view)
*
* if self.lock != NULL: # <<<<<<<<<<<<<<
* PyThread_free_lock(self.lock)
*
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":344
*
* if self.lock != NULL:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":339
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":346
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":348
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":350
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":351
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":350
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":353
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":346
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":356
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":357
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":358
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
}
/* "View.MemoryView":360
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":363
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 363; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_2) {
/* "View.MemoryView":364
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":366
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":367
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":356
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":369
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":370
*
* def __setitem__(memoryview self, object index, object value):
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (likely(__pyx_t_1 != Py_None)) {
PyObject* sequence = __pyx_t_1;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
#else
__pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_have_slices = __pyx_t_2;
__pyx_t_2 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":372
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":373
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_obj = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":374
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":375
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":377
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
__pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L4:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":379
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L3:;
/* "View.MemoryView":369
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":381
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":382
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, ((PyObject *)__pyx_memoryview_type));
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":384
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":385
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":384
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L11_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":386
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":387
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L11_try_end:;
}
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":389
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":381
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":391
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":395
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":396
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":397
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":395
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":391
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":399
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[128];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
char const *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":401
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":406
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice));
/* "View.MemoryView":408
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":409
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":410
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_1 = ((__pyx_v_tmp == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":411
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":412
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":414
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":416
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":417
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":418
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
goto __pyx_L8;
}
/*else*/ {
/* "View.MemoryView":420
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L6_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L8:;
/* "View.MemoryView":424
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":425
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 425; __pyx_clineno = __LINE__; goto __pyx_L6_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
goto __pyx_L9;
}
__pyx_L9:;
/* "View.MemoryView":426
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":429
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
/*exception exit:*/{
__pyx_L6_error:;
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11);
}
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8);
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":399
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":432
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 432; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":433
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":431
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":435
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":438
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 438; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":441
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":442
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":443
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = NULL;
}
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
}
/*else:*/ {
/* "View.MemoryView":447
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":448
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 448; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;};
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
}
/* "View.MemoryView":449
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":444
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_12 = PyErr_ExceptionMatches(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__pyx_t_12) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_9);
/* "View.MemoryView":445
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":435
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":451
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
Py_ssize_t __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
char *__pyx_t_10;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":454
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":459
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":460
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":462
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL;
}
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":464
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_7 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_9 = __pyx_v_bytesvalue;
__pyx_t_11 = PyBytes_AS_STRING(__pyx_t_9);
__pyx_t_12 = (__pyx_t_11 + PyBytes_GET_SIZE(__pyx_t_9));
for (__pyx_t_13 = __pyx_t_11; __pyx_t_13 < __pyx_t_12; __pyx_t_13++) {
__pyx_t_10 = __pyx_t_13;
__pyx_v_c = (__pyx_t_10[0]);
/* "View.MemoryView":465
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_7;
/* "View.MemoryView":464
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_7 = (__pyx_t_7 + 1);
/* "View.MemoryView":465
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":451
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":468
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
char *__pyx_t_3;
void *__pyx_t_4;
int __pyx_t_5;
Py_ssize_t __pyx_t_6;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":469
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":470
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_2 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_2;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":472
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
__pyx_v_info->shape = NULL;
}
__pyx_L3:;
/* "View.MemoryView":474
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":475
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_2 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_2;
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":477
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
__pyx_v_info->strides = NULL;
}
__pyx_L4:;
/* "View.MemoryView":479
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":480
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_2 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_2;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":482
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->suboffsets = NULL;
}
__pyx_L5:;
/* "View.MemoryView":484
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":485
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_3 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_3;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":487
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
__pyx_v_info->format = NULL;
}
__pyx_L6:;
/* "View.MemoryView":489
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_4 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":490
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_5 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_5;
/* "View.MemoryView":491
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = 0
*/
__pyx_t_6 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_6;
/* "View.MemoryView":492
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = 0
* info.obj = self
*/
__pyx_t_6 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_6;
/* "View.MemoryView":493
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = 0 # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":494
* info.len = self.view.len
* info.readonly = 0
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":468
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* function exit code */
__pyx_r = 0;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* property T:
* @cname('__pyx_memoryview_transpose')
* def __get__(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":502
* @cname('__pyx_memoryview_transpose')
* def __get__(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":503
* def __get__(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":504
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* property base:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":501
* property T:
* @cname('__pyx_memoryview_transpose')
* def __get__(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":508
* property base:
* @cname('__pyx_memoryview__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":509
* @cname('__pyx_memoryview__get__base')
* def __get__(self):
* return self.obj # <<<<<<<<<<<<<<
*
* property shape:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":508
* property base:
* @cname('__pyx_memoryview__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":513
* property shape:
* @cname('__pyx_memoryview_get_shape')
* def __get__(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":514
* @cname('__pyx_memoryview_get_shape')
* def __get__(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* property strides:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":513
* property shape:
* @cname('__pyx_memoryview_get_shape')
* def __get__(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
* property strides:
* @cname('__pyx_memoryview_get_strides')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":519
* @cname('__pyx_memoryview_get_strides')
* def __get__(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":521
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":523
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* property suboffsets:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":518
* property strides:
* @cname('__pyx_memoryview_get_strides')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":527
* property suboffsets:
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":528
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":529
* def __get__(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__20, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":531
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* property ndim:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":527
* property suboffsets:
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":535
* property ndim:
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":536
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* property itemsize:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 536; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":535
* property ndim:
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":540
* property itemsize:
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":541
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* property nbytes:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 541; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":540
* property itemsize:
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":545
* property nbytes:
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":546
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* property size:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":545
* property nbytes:
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":550
* property size:
* @cname('__pyx_memoryview_get_size')
* def __get__(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":551
* @cname('__pyx_memoryview_get_size')
* def __get__(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":552
* def __get__(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":554
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":555
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 555; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":557
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":559
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":550
* property size:
* @cname('__pyx_memoryview_get_size')
* def __get__(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":561
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":562
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":563
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
}
/* "View.MemoryView":565
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":561
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":568
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":569
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":568
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":571
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":572
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":571
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":578
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":579
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice, 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":581
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":584
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":585
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice, 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":581
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":587
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":589
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":591
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":592
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), __pyx_k_c, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":597
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":587
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":599
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":601
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":603
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":604
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), __pyx_k_fortran, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":609
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":599
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":613
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":614
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":615
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":616
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":613
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":620
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, ((PyObject *)__pyx_memoryview_type));
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":619
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":622
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":627
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":628
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 628; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":630
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":632
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":633
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":634
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":635
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = PyNumber_Add(__pyx_t_3, __pyx_int_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":636
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":637
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":638
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__21);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__21);
__Pyx_GIVEREF(__pyx_slice__21);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":639
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":641
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__22); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L7:;
/* "View.MemoryView":642
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":644
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":645
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_11);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_Raise(__pyx_t_7, 0, 0, 0);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":647
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":648
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L6:;
/* "View.MemoryView":635
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":650
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":651
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":652
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__23);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__23);
__Pyx_GIVEREF(__pyx_slice__23);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L13;
}
__pyx_L13:;
/* "View.MemoryView":654
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_7);
__pyx_t_7 = 0;
goto __pyx_L0;
/* "View.MemoryView":622
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":656
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":657
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":658
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":659
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
/* "View.MemoryView":656
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":667
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":674
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)));
/* "View.MemoryView":678
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
#endif
/* "View.MemoryView":680
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":681
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":682
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":684
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":685
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":691
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":692
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":697
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":698
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":702
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":703
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":707
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":704
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
/* "View.MemoryView":710
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":711
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":712
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":713
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1;
/* "View.MemoryView":714
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":716
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":717
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":718
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":720
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 720; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":721
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":722
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":724
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":730
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":702
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":732
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":733
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":734
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
/* "View.MemoryView":735
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
/* "View.MemoryView":733
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 733; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 733; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":738
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":739
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 738; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":738
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 738; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":666
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":763
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":783
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":785
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":786
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":787
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":788
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":791
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":793
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":794
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
/* "View.MemoryView":797
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":798
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":799
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":800
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":801
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
goto __pyx_L13;
}
__pyx_L13:;
goto __pyx_L12;
}
/* "View.MemoryView":802
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":803
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":804
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
goto __pyx_L14;
}
/*else*/ {
/* "View.MemoryView":806
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
goto __pyx_L12;
}
__pyx_L12:;
goto __pyx_L11;
}
/*else*/ {
/* "View.MemoryView":808
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":809
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
goto __pyx_L15;
}
/*else*/ {
/* "View.MemoryView":811
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":813
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":814
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":815
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":816
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":817
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
goto __pyx_L18;
}
__pyx_L18:;
goto __pyx_L17;
}
/* "View.MemoryView":818
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":819
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
goto __pyx_L17;
}
__pyx_L17:;
goto __pyx_L16;
}
/*else*/ {
/* "View.MemoryView":821
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":822
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1;
goto __pyx_L19;
}
/*else*/ {
/* "View.MemoryView":824
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":826
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":827
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
goto __pyx_L20;
}
__pyx_L20:;
/* "View.MemoryView":831
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":833
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":834
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
goto __pyx_L21;
}
__pyx_L21:;
/* "View.MemoryView":836
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":837
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
goto __pyx_L22;
}
__pyx_L22:;
/* "View.MemoryView":840
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":841
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":842
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":845
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":846
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
goto __pyx_L23;
}
/*else*/ {
/* "View.MemoryView":848
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":850
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":851
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":852
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
goto __pyx_L26;
}
/*else*/ {
/* "View.MemoryView":855
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 855; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L26:;
goto __pyx_L25;
}
/*else*/ {
/* "View.MemoryView":858
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
goto __pyx_L24;
}
__pyx_L24:;
/* "View.MemoryView":860
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":763
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":866
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":868
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1;
/* "View.MemoryView":869
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":872
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":873
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":874
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":876
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":877
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":878
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":879
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
goto __pyx_L4;
}
__pyx_L4:;
}
__pyx_L3:;
/* "View.MemoryView":881
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":882
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":883
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":884
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
goto __pyx_L5;
}
__pyx_L5:;
/* "View.MemoryView":886
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":887
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":889
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":890
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":891
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
goto __pyx_L8;
}
__pyx_L8:;
/* "View.MemoryView":893
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":866
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":899
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":900
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":902
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":903
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":907
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":908
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":909
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_4 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_4;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":910
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_5 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_4 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_4;
/* "View.MemoryView":912
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L6_bool_binop_done:;
if (__pyx_t_6) {
/* "View.MemoryView":913
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, __pyx_k_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 913; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
}
/* "View.MemoryView":915
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":899
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":932
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":933
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":932
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":935
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":936
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":937
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 937; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":939
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 939; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":935
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":941
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":942
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":943
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":945
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* property base:
*/
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":941
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":949
* property base:
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":950
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":949
* property base:
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":956
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":964
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":965
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
}
/* "View.MemoryView":970
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 970; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 970; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryviewslice_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 970; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":972
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":973
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":975
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 975; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":976
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":978
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":979
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":980
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":981
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":982
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* result.flags = PyBUF_RECORDS
*/
Py_INCREF(Py_None);
/* "View.MemoryView":984
* Py_INCREF(Py_None)
*
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":986
* result.flags = PyBUF_RECORDS
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":987
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":990
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":991
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":992
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":993
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":994
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L5_break;
}
}
__pyx_L5_break:;
/* "View.MemoryView":996
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":997
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 997; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":998
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1000
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1001
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1003
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":956
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1006
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1009
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1010
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1011
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1013
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1014
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1006
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1017
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1021
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1022
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1023
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1025
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1026
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1028
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_dim = __pyx_t_3;
/* "View.MemoryView":1029
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1030
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1031
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_4 = -1;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4;
}
/* "View.MemoryView":1017
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1034
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1037
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1038
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1034
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1041
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1048
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1049
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1050
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1052
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1053
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1055
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1057
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1055; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1041
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1064
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1065
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1067
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1063
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1070
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1075
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1076
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1078
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1079
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1080
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1081
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
}
}
__pyx_L4_break:;
/* "View.MemoryView":1083
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1084
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1085
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1086
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
}
}
__pyx_L7_break:;
/* "View.MemoryView":1088
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1089
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1091
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1070
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1094
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
/* "View.MemoryView":1101
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1102
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1103
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1104
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1106
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1107
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1108
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":1109
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent));
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":1111
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
__pyx_t_4 = __pyx_v_dst_extent;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1112
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize);
/* "View.MemoryView":1113
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1114
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1116
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
__pyx_t_4 = __pyx_v_dst_extent;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1117
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1121
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1122
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1094
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1124
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1127
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1124
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1131
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1134
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
* cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1136
* cdef Py_ssize_t size = src.memview.view.itemsize
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* size *= src.shape[i]
*
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1137
*
* for i in range(ndim):
* size *= src.shape[i] # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i]));
}
/* "View.MemoryView":1139
* size *= src.shape[i]
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1131
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1142
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1151
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1152
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_idx = __pyx_t_3;
/* "View.MemoryView":1153
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1154
* for idx in range(ndim):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1156
* stride = stride * shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1157
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1158
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1160
* stride = stride * shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1142
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1163
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1174
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1175
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1177
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1178
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1179
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1182
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1183
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1184
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1185
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1186
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1;
}
/* "View.MemoryView":1188
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order);
/* "View.MemoryView":1192
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1193
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1194
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src, order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
goto __pyx_L8;
}
__pyx_L8:;
}
/* "View.MemoryView":1196
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1197
*
* if slice_is_contig(src, order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size);
goto __pyx_L9;
}
/*else*/ {
/* "View.MemoryView":1199
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1201
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1163
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1206
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1209
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1208
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1206
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1212
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1213
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = NULL;
PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1212
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1216
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1217
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1218
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (!__pyx_t_5) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else {
__pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL;
PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/*else*/ {
/* "View.MemoryView":1220
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":1216
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1223
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1231
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1232
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1234
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1235
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1236
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1239
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1240
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
goto __pyx_L3;
}
/* "View.MemoryView":1241
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1244
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1246
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1247
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1248
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1249
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1250
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":1252
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
__pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1252; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L7:;
goto __pyx_L6;
}
__pyx_L6:;
/* "View.MemoryView":1254
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1255
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
}
/* "View.MemoryView":1257
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(&src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1259
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig((&__pyx_v_src), __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1260
*
* if not slice_is_contig(&src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
goto __pyx_L10;
}
__pyx_L10:;
/* "View.MemoryView":1262
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1262; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_tmpdata = __pyx_t_6;
/* "View.MemoryView":1263
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
goto __pyx_L9;
}
__pyx_L9:;
/* "View.MemoryView":1265
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1268
*
*
* if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1269
*
* if slice_is_contig(&src, 'C', ndim):
* direct_copy = slice_is_contig(&dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(&src, 'F', ndim):
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'C', __pyx_v_ndim);
goto __pyx_L12;
}
/* "View.MemoryView":1270
* if slice_is_contig(&src, 'C', ndim):
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1271
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim):
* direct_copy = slice_is_contig(&dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'F', __pyx_v_ndim);
goto __pyx_L12;
}
__pyx_L12:;
/* "View.MemoryView":1273
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1275
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1276
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim));
/* "View.MemoryView":1277
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1278
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1279
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
}
goto __pyx_L11;
}
__pyx_L11:;
/* "View.MemoryView":1281
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_7 = (__pyx_t_2 != 0);
if (__pyx_t_7) {
/* "View.MemoryView":1284
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1285
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L14;
}
__pyx_L14:;
/* "View.MemoryView":1287
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1288
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1289
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1291
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1292
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1223
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1295
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
/* "View.MemoryView":1299
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1301
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1302
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1303
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1304
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1306
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1307
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1308
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1309
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1;
}
/* "View.MemoryView":1295
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1317
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1321
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1322
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1317
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1326
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1329
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1326
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1332
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1336
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1337
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_3 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1338
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_3 = (__pyx_v_inc != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1339
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":1341
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":1343
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1346
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1332
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1352
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1355
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1356
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1358
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1352
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
/* "View.MemoryView":1366
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1367
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1369
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1370
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1371
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize);
/* "View.MemoryView":1372
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1374
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1375
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1377
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) {
Py_DECREF(o); o = 0;
}
return o;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return get_memview(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
0, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
0, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"_interpolate3d.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"_interpolate3d.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) {
Py_DECREF(o); o = 0;
}
return o;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_transpose(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview__get__base(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_shape(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_strides(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_suboffsets(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_ndim(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_itemsize(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_nbytes(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_size(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, 0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, 0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, 0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, 0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, 0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, 0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, 0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, 0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"_interpolate3d.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryviewslice__get__base(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"_interpolate3d._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
"_interpolate3d",
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_, __pyx_k_, sizeof(__pyx_k_), 0, 0, 1, 0},
{&__pyx_n_s_AttributeError, __pyx_k_AttributeError, sizeof(__pyx_k_AttributeError), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_s_Expected_at_least_d_arguments, __pyx_k_Expected_at_least_d_arguments, sizeof(__pyx_k_Expected_at_least_d_arguments), 0, 0, 1, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_kp_s_Function_call_with_ambiguous_arg, __pyx_k_Function_call_with_ambiguous_arg, sizeof(__pyx_k_Function_call_with_ambiguous_arg), 0, 0, 1, 0},
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_kp_s_No_matching_signature_found, __pyx_k_No_matching_signature_found, sizeof(__pyx_k_No_matching_signature_found), 0, 0, 1, 0},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_kp_s__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 0, 1, 0},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_defaults, __pyx_k_defaults, sizeof(__pyx_k_defaults), 0, 0, 1, 1},
{&__pyx_n_s_double, __pyx_k_double, sizeof(__pyx_k_double), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_interpolate3d, __pyx_k_interpolate3d, sizeof(__pyx_k_interpolate3d), 0, 0, 1, 1},
{&__pyx_n_s_interpolate3d_2, __pyx_k_interpolate3d_2, sizeof(__pyx_k_interpolate3d_2), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_kind, __pyx_k_kind, sizeof(__pyx_k_kind), 0, 0, 1, 1},
{&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mid_ind, __pyx_k_mid_ind, sizeof(__pyx_k_mid_ind), 0, 0, 1, 1},
{&__pyx_kp_s_mnt_pact_ds381_seren3_src_analy, __pyx_k_mnt_pact_ds381_seren3_src_analy, sizeof(__pyx_k_mnt_pact_ds381_seren3_src_analy), 0, 0, 1, 0},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1},
{&__pyx_n_s_n_x_vals, __pyx_k_n_x_vals, sizeof(__pyx_k_n_x_vals), 0, 0, 1, 1},
{&__pyx_n_s_n_y_vals, __pyx_k_n_y_vals, sizeof(__pyx_k_n_y_vals), 0, 0, 1, 1},
{&__pyx_n_s_n_z_vals, __pyx_k_n_z_vals, sizeof(__pyx_k_n_z_vals), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndarray, __pyx_k_ndarray, sizeof(__pyx_k_ndarray), 0, 0, 1, 1},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_ord, __pyx_k_ord, sizeof(__pyx_k_ord), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_result_array, __pyx_k_result_array, sizeof(__pyx_k_result_array), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_signatures, __pyx_k_signatures, sizeof(__pyx_k_signatures), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_strip, __pyx_k_strip, sizeof(__pyx_k_strip), 0, 0, 1, 1},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_sys, __pyx_k_sys, sizeof(__pyx_k_sys), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_v0, __pyx_k_v0, sizeof(__pyx_k_v0), 0, 0, 1, 1},
{&__pyx_n_s_v00, __pyx_k_v00, sizeof(__pyx_k_v00), 0, 0, 1, 1},
{&__pyx_n_s_v000, __pyx_k_v000, sizeof(__pyx_k_v000), 0, 0, 1, 1},
{&__pyx_n_s_v001, __pyx_k_v001, sizeof(__pyx_k_v001), 0, 0, 1, 1},
{&__pyx_n_s_v01, __pyx_k_v01, sizeof(__pyx_k_v01), 0, 0, 1, 1},
{&__pyx_n_s_v010, __pyx_k_v010, sizeof(__pyx_k_v010), 0, 0, 1, 1},
{&__pyx_n_s_v011, __pyx_k_v011, sizeof(__pyx_k_v011), 0, 0, 1, 1},
{&__pyx_n_s_v1, __pyx_k_v1, sizeof(__pyx_k_v1), 0, 0, 1, 1},
{&__pyx_n_s_v10, __pyx_k_v10, sizeof(__pyx_k_v10), 0, 0, 1, 1},
{&__pyx_n_s_v100, __pyx_k_v100, sizeof(__pyx_k_v100), 0, 0, 1, 1},
{&__pyx_n_s_v101, __pyx_k_v101, sizeof(__pyx_k_v101), 0, 0, 1, 1},
{&__pyx_n_s_v11, __pyx_k_v11, sizeof(__pyx_k_v11), 0, 0, 1, 1},
{&__pyx_n_s_v110, __pyx_k_v110, sizeof(__pyx_k_v110), 0, 0, 1, 1},
{&__pyx_n_s_v111, __pyx_k_v111, sizeof(__pyx_k_v111), 0, 0, 1, 1},
{&__pyx_n_s_vals, __pyx_k_vals, sizeof(__pyx_k_vals), 0, 0, 1, 1},
{&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1},
{&__pyx_n_s_x_bot_ind, __pyx_k_x_bot_ind, sizeof(__pyx_k_x_bot_ind), 0, 0, 1, 1},
{&__pyx_n_s_x_fac, __pyx_k_x_fac, sizeof(__pyx_k_x_fac), 0, 0, 1, 1},
{&__pyx_n_s_x_top_ind, __pyx_k_x_top_ind, sizeof(__pyx_k_x_top_ind), 0, 0, 1, 1},
{&__pyx_n_s_x_vals, __pyx_k_x_vals, sizeof(__pyx_k_x_vals), 0, 0, 1, 1},
{&__pyx_n_s_xi, __pyx_k_xi, sizeof(__pyx_k_xi), 0, 0, 1, 1},
{&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1},
{&__pyx_n_s_y_bot_ind, __pyx_k_y_bot_ind, sizeof(__pyx_k_y_bot_ind), 0, 0, 1, 1},
{&__pyx_n_s_y_fac, __pyx_k_y_fac, sizeof(__pyx_k_y_fac), 0, 0, 1, 1},
{&__pyx_n_s_y_top_ind, __pyx_k_y_top_ind, sizeof(__pyx_k_y_top_ind), 0, 0, 1, 1},
{&__pyx_n_s_y_vals, __pyx_k_y_vals, sizeof(__pyx_k_y_vals), 0, 0, 1, 1},
{&__pyx_n_s_yi, __pyx_k_yi, sizeof(__pyx_k_yi), 0, 0, 1, 1},
{&__pyx_n_s_z, __pyx_k_z, sizeof(__pyx_k_z), 0, 0, 1, 1},
{&__pyx_n_s_z_bot_ind, __pyx_k_z_bot_ind, sizeof(__pyx_k_z_bot_ind), 0, 0, 1, 1},
{&__pyx_n_s_z_fac, __pyx_k_z_fac, sizeof(__pyx_k_z_fac), 0, 0, 1, 1},
{&__pyx_n_s_z_top_ind, __pyx_k_z_top_ind, sizeof(__pyx_k_z_top_ind), 0, 0, 1, 1},
{&__pyx_n_s_z_vals, __pyx_k_z_vals, sizeof(__pyx_k_z_vals), 0, 0, 1, 1},
{&__pyx_n_s_zi, __pyx_k_zi, sizeof(__pyx_k_zi), 0, 0, 1, 1},
{&__pyx_n_s_zip, __pyx_k_zip, sizeof(__pyx_k_zip), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_AttributeError = __Pyx_GetBuiltinName(__pyx_n_s_AttributeError); if (!__pyx_builtin_AttributeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_ord = __Pyx_GetBuiltinName(__pyx_n_s_ord); if (!__pyx_builtin_ord) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_zip = __Pyx_GetBuiltinName(__pyx_n_s_zip); if (!__pyx_builtin_zip) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "_interpolate3d.pyx":14
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def interpolate3d(int n, # <<<<<<<<<<<<<<
* np.ndarray[floating,ndim=1] x,
* np.ndarray[floating,ndim=1] y,
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s__3); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_No_matching_signature_found); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Function_call_with_ambiguous_arg); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":260
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":802
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":806
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "../../../../lustre/scratch/astro/ds381/yt-x86_64/lib/python2.7/site-packages/Cython-0.22-py2.7-linux-x86_64.egg/Cython/Includes/numpy/__init__.pxd":826
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":127
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":130
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if isinstance(format, unicode):
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":142
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":170
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__16)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "View.MemoryView":186
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "View.MemoryView":445
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__18)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "View.MemoryView":521
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "View.MemoryView":529
* def __get__(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__20 = PyTuple_New(1); if (unlikely(!__pyx_tuple__20)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_INCREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__20, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "View.MemoryView":638
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__21 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__21)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_slice__21);
__Pyx_GIVEREF(__pyx_slice__21);
/* "View.MemoryView":641
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_slice__22 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__22)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_slice__22);
__Pyx_GIVEREF(__pyx_slice__22);
/* "View.MemoryView":652
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_slice__23 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__23)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_slice__23);
__Pyx_GIVEREF(__pyx_slice__23);
/* "View.MemoryView":659
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__24)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "_interpolate3d.pyx":14
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def interpolate3d(int n, # <<<<<<<<<<<<<<
* np.ndarray[floating,ndim=1] x,
* np.ndarray[floating,ndim=1] y,
*/
__pyx_tuple__25 = PyTuple_Pack(40, __pyx_n_s_n, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_n_x_vals, __pyx_n_s_x_vals, __pyx_n_s_n_y_vals, __pyx_n_s_y_vals, __pyx_n_s_n_z_vals, __pyx_n_s_z_vals, __pyx_n_s_vals, __pyx_n_s_result_array, __pyx_n_s_x_top_ind, __pyx_n_s_x_bot_ind, __pyx_n_s_y_top_ind, __pyx_n_s_y_bot_ind, __pyx_n_s_z_top_ind, __pyx_n_s_z_bot_ind, __pyx_n_s_mid_ind, __pyx_n_s_x_fac, __pyx_n_s_y_fac, __pyx_n_s_z_fac, __pyx_n_s_v0, __pyx_n_s_v1, __pyx_n_s_v00, __pyx_n_s_v01, __pyx_n_s_v10, __pyx_n_s_v11, __pyx_n_s_v000, __pyx_n_s_v001, __pyx_n_s_v010, __pyx_n_s_v011, __pyx_n_s_v100, __pyx_n_s_v101, __pyx_n_s_v110, __pyx_n_s_v111, __pyx_n_s_xi, __pyx_n_s_yi, __pyx_n_s_zi, __pyx_n_s_i); if (unlikely(!__pyx_tuple__25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
__pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(12, 0, 40, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_mnt_pact_ds381_seren3_src_analy, __pyx_n_s_interpolate3d, 14, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":276
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__27)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
/* "View.MemoryView":277
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__28)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__28);
__Pyx_GIVEREF(__pyx_tuple__28);
/* "View.MemoryView":278
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__29)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__29);
__Pyx_GIVEREF(__pyx_tuple__29);
/* "View.MemoryView":281
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__30)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__30);
__Pyx_GIVEREF(__pyx_tuple__30);
/* "View.MemoryView":282
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__31)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__31);
__Pyx_GIVEREF(__pyx_tuple__31);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC init_interpolate3d(void); /*proto*/
PyMODINIT_FUNC init_interpolate3d(void)
#else
PyMODINIT_FUNC PyInit__interpolate3d(void); /*proto*/
PyMODINIT_FUNC PyInit__interpolate3d(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__interpolate3d(void)", 0);
if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#ifdef __Pyx_CyFunction_USED
if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("_interpolate3d", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
/*--- Initialize various global constants etc. ---*/
if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
if (__pyx_module_is_main__interpolate3d) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!PyDict_GetItemString(modules, "_interpolate3d")) {
if (unlikely(PyDict_SetItemString(modules, "_interpolate3d", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
#endif
/*--- Builtin init code ---*/
if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Constants init code ---*/
if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
if (PyType_Ready(&__pyx_type___pyx_array) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_array.tp_print = 0;
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 269; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_MemviewEnum.tp_print = 0;
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_memoryview.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_memoryviewslice.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
/*--- Type import code ---*/
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type",
#if CYTHON_COMPILING_IN_PYPY
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
/* "_interpolate3d.pyx":3
* cimport numpy as np
* cimport cython
* import numpy as np # <<<<<<<<<<<<<<
* import sys
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "_interpolate3d.pyx":4
* cimport cython
* import numpy as np
* import sys # <<<<<<<<<<<<<<
*
* from cython cimport floating
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_sys, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_sys, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "_interpolate3d.pyx":14
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def interpolate3d(int n, # <<<<<<<<<<<<<<
* np.ndarray[floating,ndim=1] x,
* np.ndarray[floating,ndim=1] y,
*/
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __pyx_FusedFunction_NewEx(&__pyx_fuse_0__pyx_mdef_14_interpolate3d_3interpolate3d, 0, __pyx_n_s_interpolate3d, NULL, __pyx_n_s_interpolate3d_2, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_float, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __pyx_FusedFunction_NewEx(&__pyx_fuse_1__pyx_mdef_14_interpolate3d_5interpolate3d, 0, __pyx_n_s_interpolate3d, NULL, __pyx_n_s_interpolate3d_2, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_double, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __pyx_FusedFunction_NewEx(&__pyx_mdef_14_interpolate3d_1interpolate3d, 0, __pyx_n_s_interpolate3d, NULL, __pyx_n_s_interpolate3d_2, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_empty_tuple);
((__pyx_FusedFunctionObject *) __pyx_t_2)->__signatures__ = __pyx_t_1;
__Pyx_GIVEREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_interpolate3d, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "_interpolate3d.pyx":1
* cimport numpy as np # <<<<<<<<<<<<<<
* cimport cython
* import numpy as np
*/
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":203
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":276
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":277
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":278
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":281
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":282
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":496
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":952
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "__pyxutil":2
*
* cdef extern from *: # <<<<<<<<<<<<<<
* void __pyx_PyErr_Clear "PyErr_Clear" ()
* __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(object)
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init _interpolate3d", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init _interpolate3d");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* --- Runtime support code --- */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
#else
PyErr_GetExcInfo(type, value, tb);
#endif
}
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(type, value, tb);
#endif
}
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_COMPILING_IN_CPYTHON
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_Restore(type, value, tb);
#endif
}
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(type, value, tb);
#endif
}
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
if (PyObject_IsSubclass(instance_class, type)) {
type = instance_class;
} else {
instance_class = NULL;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(tmp_type, tmp_value, tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
int r;
if (!j) return -1;
r = PyObject_SetItem(o, j, v);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
int is_list, int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o));
if ((!boundscheck) || likely((n >= 0) & (n < PyList_GET_SIZE(o)))) {
PyObject* old = PyList_GET_ITEM(o, n);
Py_INCREF(v);
PyList_SET_ITEM(o, n, v);
Py_DECREF(old);
return 1;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_ass_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (PyErr_ExceptionMatches(PyExc_OverflowError))
PyErr_Clear();
else
return -1;
}
}
return m->sq_ass_item(o, i, v);
}
}
#else
#if CYTHON_COMPILING_IN_PYPY
if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) {
#else
if (is_list || PySequence_Check(o)) {
#endif
return PySequence_SetItem(o, i, v);
}
#endif
return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v);
}
static CYTHON_INLINE int __Pyx_IterFinish(void) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
PyObject* exc_type = tstate->curexc_type;
if (unlikely(exc_type)) {
if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) {
PyObject *exc_value, *exc_tb;
exc_value = tstate->curexc_value;
exc_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
Py_DECREF(exc_type);
Py_XDECREF(exc_value);
Py_XDECREF(exc_tb);
return 0;
} else {
return -1;
}
}
return 0;
#else
if (unlikely(PyErr_Occurred())) {
if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
PyErr_Clear();
return 0;
} else {
return -1;
}
}
return 0;
#endif
}
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject* args = PyTuple_Pack(1, arg);
return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL;
}
#endif
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) {
PyObject *method, *result = NULL;
method = __Pyx_PyObject_GetAttrStr(obj, method_name);
if (unlikely(!method)) goto bad;
#if CYTHON_COMPILING_IN_CPYTHON
if (likely(PyMethod_Check(method))) {
PyObject *self = PyMethod_GET_SELF(method);
if (likely(self)) {
PyObject *function = PyMethod_GET_FUNCTION(method);
result = __Pyx_PyObject_CallOneArg(function, self);
Py_DECREF(method);
return result;
}
}
#endif
result = __Pyx_PyObject_CallNoArg(method);
Py_DECREF(method);
bad:
return result;
}
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
if (unlikely(retval)) {
Py_DECREF(retval);
__Pyx_RaiseTooManyValuesError(expected);
return -1;
} else {
return __Pyx_IterFinish();
}
return 0;
}
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
__Pyx_RaiseNoneNotIterableError();
} else if (PyTuple_GET_SIZE(t) < index) {
__Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
} else {
__Pyx_RaiseTooManyValuesError(index);
}
}
static CYTHON_INLINE int __Pyx_unpack_tuple2(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2,
int is_tuple, int has_known_size, int decref_tuple) {
Py_ssize_t index;
PyObject *value1 = NULL, *value2 = NULL, *iter = NULL;
if (!is_tuple && unlikely(!PyTuple_Check(tuple))) {
iternextfunc iternext;
iter = PyObject_GetIter(tuple);
if (unlikely(!iter)) goto bad;
if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; }
iternext = Py_TYPE(iter)->tp_iternext;
value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; }
value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; }
if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad;
Py_DECREF(iter);
} else {
if (!has_known_size && unlikely(PyTuple_GET_SIZE(tuple) != 2)) {
__Pyx_UnpackTupleError(tuple, 2);
goto bad;
}
#if CYTHON_COMPILING_IN_PYPY
value1 = PySequence_ITEM(tuple, 0);
if (unlikely(!value1)) goto bad;
value2 = PySequence_ITEM(tuple, 1);
if (unlikely(!value2)) goto bad;
#else
value1 = PyTuple_GET_ITEM(tuple, 0);
value2 = PyTuple_GET_ITEM(tuple, 1);
Py_INCREF(value1);
Py_INCREF(value2);
#endif
if (decref_tuple) { Py_DECREF(tuple); }
}
*pvalue1 = value1;
*pvalue2 = value2;
return 0;
unpacking_failed:
if (!has_known_size && __Pyx_IterFinish() == 0)
__Pyx_RaiseNeedMoreValuesError(index);
bad:
Py_XDECREF(iter);
Py_XDECREF(value1);
Py_XDECREF(value2);
if (decref_tuple) { Py_XDECREF(tuple); }
return -1;
}
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name,
Py_ssize_t* p_orig_length, int* p_source_is_dict) {
is_dict = is_dict || likely(PyDict_CheckExact(iterable));
*p_source_is_dict = is_dict;
#if !CYTHON_COMPILING_IN_PYPY
if (is_dict) {
*p_orig_length = PyDict_Size(iterable);
Py_INCREF(iterable);
return iterable;
}
#endif
*p_orig_length = 0;
if (method_name) {
PyObject* iter;
iterable = __Pyx_PyObject_CallMethod0(iterable, method_name);
if (!iterable)
return NULL;
#if !CYTHON_COMPILING_IN_PYPY
if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable))
return iterable;
#endif
iter = PyObject_GetIter(iterable);
Py_DECREF(iterable);
return iter;
}
return PyObject_GetIter(iterable);
}
static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* iter_obj, Py_ssize_t orig_length, Py_ssize_t* ppos,
PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) {
PyObject* next_item;
#if !CYTHON_COMPILING_IN_PYPY
if (source_is_dict) {
PyObject *key, *value;
if (unlikely(orig_length != PyDict_Size(iter_obj))) {
PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration");
return -1;
}
if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) {
return 0;
}
if (pitem) {
PyObject* tuple = PyTuple_New(2);
if (unlikely(!tuple)) {
return -1;
}
Py_INCREF(key);
Py_INCREF(value);
PyTuple_SET_ITEM(tuple, 0, key);
PyTuple_SET_ITEM(tuple, 1, value);
*pitem = tuple;
} else {
if (pkey) {
Py_INCREF(key);
*pkey = key;
}
if (pvalue) {
Py_INCREF(value);
*pvalue = value;
}
}
return 1;
} else if (PyTuple_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0;
*ppos = pos + 1;
next_item = PyTuple_GET_ITEM(iter_obj, pos);
Py_INCREF(next_item);
} else if (PyList_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0;
*ppos = pos + 1;
next_item = PyList_GET_ITEM(iter_obj, pos);
Py_INCREF(next_item);
} else
#endif
{
next_item = PyIter_Next(iter_obj);
if (unlikely(!next_item)) {
return __Pyx_IterFinish();
}
}
if (pitem) {
*pitem = next_item;
} else if (pkey && pvalue) {
if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1))
return -1;
} else if (pkey) {
*pkey = next_item;
} else {
*pvalue = next_item;
}
return 1;
}
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_COMPILING_IN_CPYTHON
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
length = strlen(cstring);
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#else
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (PyErr_ExceptionMatches(PyExc_OverflowError))
PyErr_Clear();
else
return NULL;
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
}
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) {
PyObject* fake_module;
PyTypeObject* cached_type = NULL;
fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
if (!fake_module) return NULL;
Py_INCREF(fake_module);
cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name);
if (cached_type) {
if (!PyType_Check((PyObject*)cached_type)) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s is not a type object",
type->tp_name);
goto bad;
}
if (cached_type->tp_basicsize != type->tp_basicsize) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s has the wrong size, try recompiling",
type->tp_name);
goto bad;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
PyErr_Clear();
if (PyType_Ready(type) < 0) goto bad;
if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0)
goto bad;
Py_INCREF(type);
cached_type = type;
}
done:
Py_DECREF(fake_module);
return cached_type;
bad:
Py_XDECREF(cached_type);
cached_type = NULL;
goto done;
}
static PyObject *
__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure)
{
if (unlikely(op->func_doc == NULL)) {
if (op->func.m_ml->ml_doc) {
#if PY_MAJOR_VERSION >= 3
op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc);
#else
op->func_doc = PyString_FromString(op->func.m_ml->ml_doc);
#endif
if (unlikely(op->func_doc == NULL))
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
Py_INCREF(op->func_doc);
return op->func_doc;
}
static int
__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value)
{
PyObject *tmp = op->func_doc;
if (value == NULL) {
value = Py_None;
}
Py_INCREF(value);
op->func_doc = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op)
{
if (unlikely(op->func_name == NULL)) {
#if PY_MAJOR_VERSION >= 3
op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name);
#else
op->func_name = PyString_InternFromString(op->func.m_ml->ml_name);
#endif
if (unlikely(op->func_name == NULL))
return NULL;
}
Py_INCREF(op->func_name);
return op->func_name;
}
static int
__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value))) {
#else
if (unlikely(value == NULL || !PyString_Check(value))) {
#endif
PyErr_SetString(PyExc_TypeError,
"__name__ must be set to a string object");
return -1;
}
tmp = op->func_name;
Py_INCREF(value);
op->func_name = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op)
{
Py_INCREF(op->func_qualname);
return op->func_qualname;
}
static int
__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value))) {
#else
if (unlikely(value == NULL || !PyString_Check(value))) {
#endif
PyErr_SetString(PyExc_TypeError,
"__qualname__ must be set to a string object");
return -1;
}
tmp = op->func_qualname;
Py_INCREF(value);
op->func_qualname = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure)
{
PyObject *self;
self = m->func_closure;
if (self == NULL)
self = Py_None;
Py_INCREF(self);
return self;
}
static PyObject *
__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op)
{
if (unlikely(op->func_dict == NULL)) {
op->func_dict = PyDict_New();
if (unlikely(op->func_dict == NULL))
return NULL;
}
Py_INCREF(op->func_dict);
return op->func_dict;
}
static int
__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value)
{
PyObject *tmp;
if (unlikely(value == NULL)) {
PyErr_SetString(PyExc_TypeError,
"function's dictionary may not be deleted");
return -1;
}
if (unlikely(!PyDict_Check(value))) {
PyErr_SetString(PyExc_TypeError,
"setting function's dictionary to a non-dict");
return -1;
}
tmp = op->func_dict;
Py_INCREF(value);
op->func_dict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op)
{
Py_INCREF(op->func_globals);
return op->func_globals;
}
static PyObject *
__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op)
{
Py_INCREF(Py_None);
return Py_None;
}
static PyObject *
__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op)
{
PyObject* result = (op->func_code) ? op->func_code : Py_None;
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) {
PyObject *res = op->defaults_getter((PyObject *) op);
if (unlikely(!res))
return -1;
op->defaults_tuple = PyTuple_GET_ITEM(res, 0);
Py_INCREF(op->defaults_tuple);
op->defaults_kwdict = PyTuple_GET_ITEM(res, 1);
Py_INCREF(op->defaults_kwdict);
Py_DECREF(res);
return 0;
}
static int
__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyTuple_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__defaults__ must be set to a tuple object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_tuple;
op->defaults_tuple = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op) {
PyObject* result = op->defaults_tuple;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_tuple;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__kwdefaults__ must be set to a dict object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_kwdict;
op->defaults_kwdict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op) {
PyObject* result = op->defaults_kwdict;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_kwdict;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value) {
PyObject* tmp;
if (!value || value == Py_None) {
value = NULL;
} else if (!PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__annotations__ must be set to a dict object");
return -1;
}
Py_XINCREF(value);
tmp = op->func_annotations;
op->func_annotations = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op) {
PyObject* result = op->func_annotations;
if (unlikely(!result)) {
result = PyDict_New();
if (unlikely(!result)) return NULL;
op->func_annotations = result;
}
Py_INCREF(result);
return result;
}
static PyGetSetDef __pyx_CyFunction_getsets[] = {
{(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0},
{(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0},
{(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0},
{(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0},
{0, 0, 0, 0, 0}
};
static PyMemberDef __pyx_CyFunction_members[] = {
{(char *) "__module__", T_OBJECT, offsetof(__pyx_CyFunctionObject, func.m_module), PY_WRITE_RESTRICTED, 0},
{0, 0, 0, 0, 0}
};
static PyObject *
__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args)
{
#if PY_MAJOR_VERSION >= 3
return PyUnicode_FromString(m->func.m_ml->ml_name);
#else
return PyString_FromString(m->func.m_ml->ml_name);
#endif
}
static PyMethodDef __pyx_CyFunction_methods[] = {
{"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0},
{0, 0, 0, 0}
};
#if PY_VERSION_HEX < 0x030500A0
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist)
#else
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist)
#endif
static PyObject *__Pyx_CyFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject* qualname,
PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
__pyx_CyFunctionObject *op = PyObject_GC_New(__pyx_CyFunctionObject, type);
if (op == NULL)
return NULL;
op->flags = flags;
__Pyx_CyFunction_weakreflist(op) = NULL;
op->func.m_ml = ml;
op->func.m_self = (PyObject *) op;
Py_XINCREF(closure);
op->func_closure = closure;
Py_XINCREF(module);
op->func.m_module = module;
op->func_dict = NULL;
op->func_name = NULL;
Py_INCREF(qualname);
op->func_qualname = qualname;
op->func_doc = NULL;
op->func_classobj = NULL;
op->func_globals = globals;
Py_INCREF(op->func_globals);
Py_XINCREF(code);
op->func_code = code;
op->defaults_pyobjects = 0;
op->defaults = NULL;
op->defaults_tuple = NULL;
op->defaults_kwdict = NULL;
op->defaults_getter = NULL;
op->func_annotations = NULL;
PyObject_GC_Track(op);
return (PyObject *) op;
}
static int
__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m)
{
Py_CLEAR(m->func_closure);
Py_CLEAR(m->func.m_module);
Py_CLEAR(m->func_dict);
Py_CLEAR(m->func_name);
Py_CLEAR(m->func_qualname);
Py_CLEAR(m->func_doc);
Py_CLEAR(m->func_globals);
Py_CLEAR(m->func_code);
Py_CLEAR(m->func_classobj);
Py_CLEAR(m->defaults_tuple);
Py_CLEAR(m->defaults_kwdict);
Py_CLEAR(m->func_annotations);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_XDECREF(pydefaults[i]);
PyMem_Free(m->defaults);
m->defaults = NULL;
}
return 0;
}
static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
PyObject_GC_UnTrack(m);
if (__Pyx_CyFunction_weakreflist(m) != NULL)
PyObject_ClearWeakRefs((PyObject *) m);
__Pyx_CyFunction_clear(m);
PyObject_GC_Del(m);
}
static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg)
{
Py_VISIT(m->func_closure);
Py_VISIT(m->func.m_module);
Py_VISIT(m->func_dict);
Py_VISIT(m->func_name);
Py_VISIT(m->func_qualname);
Py_VISIT(m->func_doc);
Py_VISIT(m->func_globals);
Py_VISIT(m->func_code);
Py_VISIT(m->func_classobj);
Py_VISIT(m->defaults_tuple);
Py_VISIT(m->defaults_kwdict);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_VISIT(pydefaults[i]);
}
return 0;
}
static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type)
{
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) {
Py_INCREF(func);
return func;
}
if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) {
if (type == NULL)
type = (PyObject *)(Py_TYPE(obj));
return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type)));
}
if (obj == Py_None)
obj = NULL;
return __Pyx_PyMethod_New(func, obj, type);
}
static PyObject*
__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op)
{
#if PY_MAJOR_VERSION >= 3
return PyUnicode_FromFormat("<cyfunction %U at %p>",
op->func_qualname, (void *)op);
#else
return PyString_FromFormat("<cyfunction %s at %p>",
PyString_AsString(op->func_qualname), (void *)op);
#endif
}
#if CYTHON_COMPILING_IN_PYPY
static PyObject * __Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyCFunctionObject* f = (PyCFunctionObject*)func;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
Py_ssize_t size;
switch (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST)) {
case METH_VARARGS:
if (likely(kw == NULL) || PyDict_Size(kw) == 0)
return (*meth)(self, arg);
break;
case METH_VARARGS | METH_KEYWORDS:
return (*(PyCFunctionWithKeywords)meth)(self, arg, kw);
case METH_NOARGS:
if (likely(kw == NULL) || PyDict_Size(kw) == 0) {
size = PyTuple_GET_SIZE(arg);
if (size == 0)
return (*meth)(self, NULL);
PyErr_Format(PyExc_TypeError,
"%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
case METH_O:
if (likely(kw == NULL) || PyDict_Size(kw) == 0) {
size = PyTuple_GET_SIZE(arg);
if (size == 1)
return (*meth)(self, PyTuple_GET_ITEM(arg, 0));
PyErr_Format(PyExc_TypeError,
"%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
default:
PyErr_SetString(PyExc_SystemError, "Bad call flags in "
"__Pyx_CyFunction_Call. METH_OLDARGS is no "
"longer supported!");
return NULL;
}
PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments",
f->m_ml->ml_name);
return NULL;
}
#else
static PyObject * __Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) {
return PyCFunction_Call(func, arg, kw);
}
#endif
static PyTypeObject __pyx_CyFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"cython_function_or_method",
sizeof(__pyx_CyFunctionObject),
0,
(destructor) __Pyx_CyFunction_dealloc,
0,
0,
0,
#if PY_MAJOR_VERSION < 3
0,
#else
0,
#endif
(reprfunc) __Pyx_CyFunction_repr,
0,
0,
0,
0,
__Pyx_CyFunction_Call,
0,
0,
0,
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
0,
(traverseproc) __Pyx_CyFunction_traverse,
(inquiry) __Pyx_CyFunction_clear,
0,
#if PY_VERSION_HEX < 0x030500A0
offsetof(__pyx_CyFunctionObject, func_weakreflist),
#else
offsetof(PyCFunctionObject, m_weakreflist),
#endif
0,
0,
__pyx_CyFunction_methods,
__pyx_CyFunction_members,
__pyx_CyFunction_getsets,
0,
0,
__Pyx_CyFunction_descr_get,
0,
offsetof(__pyx_CyFunctionObject, func_dict),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
#if PY_VERSION_HEX >= 0x030400a1
0,
#endif
};
static int __Pyx_CyFunction_init(void) {
#if !CYTHON_COMPILING_IN_PYPY
__pyx_CyFunctionType_type.tp_call = PyCFunction_Call;
#endif
__pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type);
if (__pyx_CyFunctionType == NULL) {
return -1;
}
return 0;
}
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults = PyMem_Malloc(size);
if (!m->defaults)
return PyErr_NoMemory();
memset(m->defaults, 0, size);
m->defaults_pyobjects = pyobjects;
return m->defaults;
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_tuple = tuple;
Py_INCREF(tuple);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_kwdict = dict;
Py_INCREF(dict);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->func_annotations = dict;
Py_INCREF(dict);
}
static PyObject *
__pyx_FusedFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags,
PyObject *qualname, PyObject *self,
PyObject *module, PyObject *globals,
PyObject *code)
{
__pyx_FusedFunctionObject *fusedfunc =
(__pyx_FusedFunctionObject *) __Pyx_CyFunction_New(type, ml, flags, qualname,
self, module, globals, code);
if (!fusedfunc)
return NULL;
fusedfunc->__signatures__ = NULL;
fusedfunc->type = NULL;
fusedfunc->self = NULL;
return (PyObject *) fusedfunc;
}
static void __pyx_FusedFunction_dealloc(__pyx_FusedFunctionObject *self) {
__pyx_FusedFunction_clear(self);
__pyx_FusedFunctionType->tp_free((PyObject *) self);
}
static int
__pyx_FusedFunction_traverse(__pyx_FusedFunctionObject *self,
visitproc visit,
void *arg)
{
Py_VISIT(self->self);
Py_VISIT(self->type);
Py_VISIT(self->__signatures__);
return __Pyx_CyFunction_traverse((__pyx_CyFunctionObject *) self, visit, arg);
}
static int
__pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self)
{
Py_CLEAR(self->self);
Py_CLEAR(self->type);
Py_CLEAR(self->__signatures__);
return __Pyx_CyFunction_clear((__pyx_CyFunctionObject *) self);
}
static PyObject *
__pyx_FusedFunction_descr_get(PyObject *self, PyObject *obj, PyObject *type)
{
__pyx_FusedFunctionObject *func, *meth;
func = (__pyx_FusedFunctionObject *) self;
if (func->self || func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD) {
Py_INCREF(self);
return self;
}
if (obj == Py_None)
obj = NULL;
meth = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_NewEx(
((PyCFunctionObject *) func)->m_ml,
((__pyx_CyFunctionObject *) func)->flags,
((__pyx_CyFunctionObject *) func)->func_qualname,
((__pyx_CyFunctionObject *) func)->func_closure,
((PyCFunctionObject *) func)->m_module,
((__pyx_CyFunctionObject *) func)->func_globals,
((__pyx_CyFunctionObject *) func)->func_code);
if (!meth)
return NULL;
Py_XINCREF(func->func.func_classobj);
meth->func.func_classobj = func->func.func_classobj;
Py_XINCREF(func->__signatures__);
meth->__signatures__ = func->__signatures__;
Py_XINCREF(type);
meth->type = type;
Py_XINCREF(func->func.defaults_tuple);
meth->func.defaults_tuple = func->func.defaults_tuple;
if (func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD)
obj = type;
Py_XINCREF(obj);
meth->self = obj;
return (PyObject *) meth;
}
static PyObject *
_obj_to_str(PyObject *obj)
{
if (PyType_Check(obj))
return PyObject_GetAttr(obj, __pyx_n_s_name_2);
else
return PyObject_Str(obj);
}
static PyObject *
__pyx_FusedFunction_getitem(__pyx_FusedFunctionObject *self, PyObject *idx)
{
PyObject *signature = NULL;
PyObject *unbound_result_func;
PyObject *result_func = NULL;
if (self->__signatures__ == NULL) {
PyErr_SetString(PyExc_TypeError, "Function is not fused");
return NULL;
}
if (PyTuple_Check(idx)) {
PyObject *list = PyList_New(0);
Py_ssize_t n = PyTuple_GET_SIZE(idx);
PyObject *string = NULL;
PyObject *sep = NULL;
int i;
if (!list)
return NULL;
for (i = 0; i < n; i++) {
PyObject *item = PyTuple_GET_ITEM(idx, i);
string = _obj_to_str(item);
if (!string || PyList_Append(list, string) < 0)
goto __pyx_err;
Py_DECREF(string);
}
sep = PyUnicode_FromString("|");
if (sep)
signature = PyUnicode_Join(sep, list);
__pyx_err:
;
Py_DECREF(list);
Py_XDECREF(sep);
} else {
signature = _obj_to_str(idx);
}
if (!signature)
return NULL;
unbound_result_func = PyObject_GetItem(self->__signatures__, signature);
if (unbound_result_func) {
if (self->self || self->type) {
__pyx_FusedFunctionObject *unbound = (__pyx_FusedFunctionObject *) unbound_result_func;
Py_CLEAR(unbound->func.func_classobj);
Py_XINCREF(self->func.func_classobj);
unbound->func.func_classobj = self->func.func_classobj;
result_func = __pyx_FusedFunction_descr_get(unbound_result_func,
self->self, self->type);
} else {
result_func = unbound_result_func;
Py_INCREF(result_func);
}
}
Py_DECREF(signature);
Py_XDECREF(unbound_result_func);
return result_func;
}
static PyObject *
__pyx_FusedFunction_callfunction(PyObject *func, PyObject *args, PyObject *kw)
{
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
PyObject *result;
int static_specialized = (cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD &&
!((__pyx_FusedFunctionObject *) func)->__signatures__);
if (cyfunc->flags & __Pyx_CYFUNCTION_CCLASS && !static_specialized) {
Py_ssize_t argc;
PyObject *new_args;
PyObject *self;
PyObject *m_self;
argc = PyTuple_GET_SIZE(args);
new_args = PyTuple_GetSlice(args, 1, argc);
if (!new_args)
return NULL;
self = PyTuple_GetItem(args, 0);
if (!self)
return NULL;
m_self = cyfunc->func.m_self;
cyfunc->func.m_self = self;
result = __Pyx_CyFunction_Call(func, new_args, kw);
cyfunc->func.m_self = m_self;
Py_DECREF(new_args);
} else {
result = __Pyx_CyFunction_Call(func, args, kw);
}
return result;
}
static PyObject *
__pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw)
{
__pyx_FusedFunctionObject *binding_func = (__pyx_FusedFunctionObject *) func;
Py_ssize_t argc = PyTuple_GET_SIZE(args);
PyObject *new_args = NULL;
__pyx_FusedFunctionObject *new_func = NULL;
PyObject *result = NULL;
PyObject *self = NULL;
int is_staticmethod = binding_func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD;
int is_classmethod = binding_func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD;
if (binding_func->self) {
Py_ssize_t i;
new_args = PyTuple_New(argc + 1);
if (!new_args)
return NULL;
self = binding_func->self;
Py_INCREF(self);
PyTuple_SET_ITEM(new_args, 0, self);
for (i = 0; i < argc; i++) {
PyObject *item = PyTuple_GET_ITEM(args, i);
Py_INCREF(item);
PyTuple_SET_ITEM(new_args, i + 1, item);
}
args = new_args;
} else if (binding_func->type) {
if (argc < 1) {
PyErr_SetString(PyExc_TypeError, "Need at least one argument, 0 given.");
return NULL;
}
self = PyTuple_GET_ITEM(args, 0);
}
if (self && !is_classmethod && !is_staticmethod &&
!PyObject_IsInstance(self, binding_func->type)) {
PyErr_Format(PyExc_TypeError,
"First argument should be of type %.200s, got %.200s.",
((PyTypeObject *) binding_func->type)->tp_name,
self->ob_type->tp_name);
goto __pyx_err;
}
if (binding_func->__signatures__) {
PyObject *tup = PyTuple_Pack(4, binding_func->__signatures__, args,
kw == NULL ? Py_None : kw,
binding_func->func.defaults_tuple);
if (!tup)
goto __pyx_err;
new_func = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_callfunction(func, tup, NULL);
Py_DECREF(tup);
if (!new_func)
goto __pyx_err;
Py_XINCREF(binding_func->func.func_classobj);
Py_CLEAR(new_func->func.func_classobj);
new_func->func.func_classobj = binding_func->func.func_classobj;
func = (PyObject *) new_func;
}
result = __pyx_FusedFunction_callfunction(func, args, kw);
__pyx_err:
Py_XDECREF(new_args);
Py_XDECREF((PyObject *) new_func);
return result;
}
static PyMemberDef __pyx_FusedFunction_members[] = {
{(char *) "__signatures__",
T_OBJECT,
offsetof(__pyx_FusedFunctionObject, __signatures__),
READONLY,
0},
{0, 0, 0, 0, 0},
};
static PyMappingMethods __pyx_FusedFunction_mapping_methods = {
0,
(binaryfunc) __pyx_FusedFunction_getitem,
0,
};
static PyTypeObject __pyx_FusedFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"fused_cython_function",
sizeof(__pyx_FusedFunctionObject),
0,
(destructor) __pyx_FusedFunction_dealloc,
0,
0,
0,
#if PY_MAJOR_VERSION < 3
0,
#else
0,
#endif
0,
0,
0,
&__pyx_FusedFunction_mapping_methods,
0,
(ternaryfunc) __pyx_FusedFunction_call,
0,
0,
0,
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE,
0,
(traverseproc) __pyx_FusedFunction_traverse,
(inquiry) __pyx_FusedFunction_clear,
0,
0,
0,
0,
0,
__pyx_FusedFunction_members,
__pyx_CyFunction_getsets,
&__pyx_CyFunctionType_type,
0,
__pyx_FusedFunction_descr_get,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
#if PY_VERSION_HEX >= 0x030400a1
0,
#endif
};
static int __pyx_FusedFunction_init(void) {
__pyx_FusedFunctionType = __Pyx_FetchCommonType(&__pyx_FusedFunctionType_type);
if (__pyx_FusedFunctionType == NULL) {
return -1;
}
return 0;
}
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = (start + end) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(c_line ? c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? c_line : py_line, py_code);
}
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = py_line;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (!buf) {
PyErr_SetString(PyExc_ValueError,
"buf is NULL.");
goto fail;
} else if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) {
va_list vargs;
char msg[200];
va_start(vargs, fmt);
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
Py_FatalError(msg);
va_end(vargs);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return;
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 1,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 1,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; }
Py_DECREF(obj);
view->obj = NULL;
}
#endif
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value) \
{ \
func_type value = func_value; \
if (sizeof(target_type) < sizeof(func_type)) { \
if (unlikely(value != (func_type) (target_type) value)) { \
func_type zero = 0; \
if (is_unsigned && unlikely(value < zero)) \
goto raise_neg_overflow; \
else \
goto raise_overflow; \
} \
} \
return (target_type) value; \
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(int) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +(((PyLongObject*)x)->ob_digit[0]));
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong(x))
} else if (sizeof(int) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(long) <= sizeof(unsigned long long)) {
return PyLong_FromUnsignedLongLong((unsigned long long) value);
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(long long)) {
return PyLong_FromLongLong((long long) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(char) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(char, unsigned long long, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +(((PyLongObject*)x)->ob_digit[0]));
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyLong_AsLong(x))
} else if (sizeof(char) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(char, long long, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) {
const Py_ssize_t length = PyBytes_GET_SIZE(bytes);
char* char_start = PyBytes_AS_STRING(bytes);
char* pos;
for (pos=char_start; pos < char_start+length; pos++) {
if (character == pos[0]) return 1;
}
return 0;
}
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(a, a);
case 3:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, a);
case 4:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_absf(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(a, a);
case 3:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, a);
case 4:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(int) <= sizeof(unsigned long long)) {
return PyLong_FromUnsignedLongLong((unsigned long long) value);
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(long long)) {
return PyLong_FromLongLong((long long) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs,
char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs->memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs->suboffsets[index] >= 0 || mvs->strides[index] != itemsize)
return 0;
itemsize *= mvs->shape[index];
}
return 1;
}
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(long) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +(((PyLongObject*)x)->ob_digit[0]));
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong(x))
} else if (sizeof(long) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling",
module_name, class_name);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else
if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
} else
#endif
#if !CYTHON_COMPILING_IN_PYPY
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return Py_INCREF(x), x;
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b)))
return PyInt_AS_LONG(b);
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(b)) {
case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0];
case 0: return 0;
case 1: return ((PyLongObject*)b)->ob_digit[0];
}
#endif
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
TrainMTCNNprocessor.h | #ifndef _TRAIN_MTCNN_PROCESSOR_H_
#define _TRAIN_MTCNN_PROCESSOR_H_
#pragma once
#include "ZQ_CNN_Net.h"
#include <vector>
#include <string>
#include <iostream>
#include <omp.h>
#include <stdio.h>
#include "opencv2/opencv.hpp"
namespace ZQ
{
class TrainMTCNNprocessor
{
public:
static bool generateWiderProb(const char* anno_file, const char* prob_file,
const char* param_file, const char* model_file, const char* out_blob_name)
{
ZQ_CNN_Net Onet;
ZQ_CNN_Tensor4D_NHW_C_Align128bit input;
if (!Onet.LoadFrom(param_file, model_file))
{
printf("failed to load Onet: %s %s\n", param_file, model_file);
return false;
}
FILE* in = 0, *out = 0;
#if defined(_WIN32)
if (0 != fopen_s(&in, anno_file, "r"))
#else
if (0 == (in = fopen(anno_file, "r")))
#endif
{
printf("failed to open %s\n", anno_file);
return false;
}
#if defined(_WIN32)
if (0 != fopen_s(&out, prob_file, "w"))
#else
if (0 == (out = fopen(prob_file, "w")))
#endif
{
printf("failed to create %s\n", prob_file);
fclose(in);
return false;
}
const int BUF_LEN = 1024 * 1024;
char* buf = (char*)malloc(BUF_LEN);
memset(buf, 0, BUF_LEN);
int handled = 0;
while (true)
{
buf[0] = '\0';
fgets(buf, BUF_LEN - 1, in);
if (buf[0] == '\0')
break;
int len = strlen(buf);
if (buf[len - 1] == '\n')
buf[--len] = '\0';
std::vector<std::string> splits = _split_blank(buf);
int split_num = splits.size();
if (split_num % 4 != 1)
{
printf("something is wrong: %s\n", buf);
fclose(in);
fclose(out);
free(buf);
return false;
}
std::string& img_file = splits[0];
int bbox_num = split_num / 4;
cv::Mat img = cv::imread(img_file, 1);
if (img.empty())
{
printf("failed to load image %s\n", img_file.c_str());
fclose(in);
fclose(out);
free(buf);
return false;
}
int img_width = img.cols;
int img_height = img.rows;
fprintf(out, "%s", img_file.c_str());
for (int i = 0; i < bbox_num; i++)
{
int x1 = atoi(splits[i * 4 + 1].c_str());
int y1 = atoi(splits[i * 4 + 2].c_str());
int x2 = atoi(splits[i * 4 + 3].c_str());
int y2 = atoi(splits[i * 4 + 4].c_str());
int w = x2 - x1;
int h = y2 - y1;
int max_side = __max(w, h);
int crop_x1 = __min(img_width - 1, __max(0, x1 + w / 2 - max_side / 2));
int crop_y1 = __min(img_height - 1, __max(0, y1 + h / 2 - max_side / 2));
int crop_x2 = __min(img_width - 1, __max(0, x1 + w / 2 + max_side / 2));
int crop_y2 = __min(img_height - 1, __max(0, y1 + h / 2 + max_side / 2));
cv::Mat crop_im = img(cv::Rect(cv::Point(crop_x1, crop_y1), cv::Point(crop_x2, crop_y2)));
if (crop_im.empty())
{
fprintf(out, " 0.00");
continue;
}
cv::Mat resize_im;
cv::resize(crop_im, resize_im, cv::Size(48, 48));
input.ConvertFromBGR(resize_im.data, resize_im.cols, resize_im.rows, resize_im.step[0]);
if (!Onet.Forward(input))
{
printf("failed to forward\n");
fclose(in);
fclose(out);
free(buf);
return false;
}
const ZQ_CNN_Tensor4D* blob = Onet.GetBlobByName(std::string(out_blob_name));
if (blob == 0)
{
printf("failed to get blob %s\n", out_blob_name);
fclose(in);
fclose(out);
free(buf);
return false;
}
const float* ptr = blob->GetFirstPixelPtr();
fprintf(out, " %.2f", ptr[1]);
}
fprintf(out, "\n");
handled++;
if (handled % 100 == 0)
{
printf("%d handled\n", handled);
}
}
free(buf);
fclose(in);
fclose(out);
return true;
}
static bool generate_data(int size, const char* root, const char* anno_file, const char* prob_file,
int base_num = 1, int thread_num = 4, float prob_thresh = 0.3)
{
const int BUF_LEN = 1000;
char save_dir[BUF_LEN] = { 0 };
char pos_save_dir[BUF_LEN] = { 0 };
char part_save_dir[BUF_LEN] = { 0 };
char neg_save_dir[BUF_LEN] = { 0 };
int len = strlen(root);
if (root[len - 1] == '/' || root[len - 1] == '\\')
{
#if defined(_WIN32)
sprintf_s(save_dir, BUF_LEN - 1, "%sprepare_data/%d", root, size);
#else
sprintf(save_dir, "%sprepare_data/%d", root, size);
#endif
}
else
{
#if defined(_WIN32)
sprintf_s(save_dir, BUF_LEN - 1, "%s/prepare_data/%d", root, size);
#else
sprintf(save_dir, "%s/prepare_data/%d", root, size);
#endif
}
#if defined(_WIN32)
sprintf_s(pos_save_dir, BUF_LEN - 1, "%s/positive", save_dir);
sprintf_s(part_save_dir, BUF_LEN - 1, "%s/part", save_dir);
sprintf_s(neg_save_dir, BUF_LEN - 1, "%s/negative", save_dir);
#else
sprintf(pos_save_dir, "%s/positive", save_dir);
sprintf(part_save_dir, "%s/part", save_dir);
sprintf(neg_save_dir, "%s/negative", save_dir);
#endif
std::string pos_file = std::string(save_dir) + "/pos.txt";
std::string part_file = std::string(save_dir) + "/part.txt";
std::string neg_file = std::string(save_dir) + "/neg.txt";
std::vector<std::string> image_files;
std::vector<std::vector<float> > all_boxes;
std::vector<std::vector<float> > all_probs;
if (!_load_anno_and_prob(anno_file, prob_file, image_files, all_boxes, all_probs))
{
printf("failed to load anno and prob file\n");
return false;
}
FILE* out_pos = 0, *out_part = 0, *out_neg = 0;
#if defined(_WIN32)
if (0 != fopen_s(&out_pos, pos_file.c_str(), "w"))
#else
if (0 == (out_pos = fopen(pos_file.c_str(), "w")))
#endif
{
printf("failed to create file %s\n", pos_file.c_str());
return false;
}
#if defined(_WIN32)
if (0 != fopen_s(&out_part, part_file.c_str(), "w"))
#else
if (0 == (out_part = fopen(part_file.c_str(), "w")))
#endif
{
printf("failed to create file %s\n", part_file.c_str());
fclose(out_pos);
return false;
}
#if defined(_WIN32)
if (0 != fopen_s(&out_neg, neg_file.c_str(), "w"))
#else
if (0 == (out_neg = fopen(neg_file.c_str(), "w")))
#endif
{
printf("failed to create file %s\n", neg_file.c_str());
fclose(out_pos);
fclose(out_part);
return false;
}
int image_num = image_files.size();
int handled[1] = { 0 };
bool ret[1] = { true };
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,10)
for (int i = 0; i < image_num; i++)
{
std::vector<std::string> pos_names, part_names, neg_names;
bool flag = true;
if (ret[0])
{
flag = _generate_data_for_one_image(i, size, image_files[i], all_boxes[i],
all_probs[i], prob_thresh,
pos_save_dir, part_save_dir, neg_save_dir,
base_num, pos_names, part_names, neg_names);
}
#pragma omp critical
{
if (!flag)
ret[0] = false;
for (int j = 0; j < pos_names.size(); j++)
{
fprintf(out_pos, "%s\n", pos_names[j].c_str());
}
for (int j = 0; j < part_names.size(); j++)
{
fprintf(out_part, "%s\n", part_names[j].c_str());
}
for (int j = 0; j < neg_names.size(); j++)
{
fprintf(out_neg, "%s\n", neg_names[j].c_str());
}
handled[0]++;
if (handled[0] % 100 == 0)
{
printf("%d handled\n", handled[0]);
}
}
}
fclose(out_pos);
fclose(out_part);
fclose(out_neg);
return ret[0];
}
static bool generate_landmark(int size, const char* root, const char* celeba_img_fold,
const char* celeba_bbox_file, const char* celeba_landmark_file,
int base_num = 1, int thread_num = 4)
{
const int BUF_LEN = 1000;
char save_dir[BUF_LEN] = { 0 };
char landmark_save_dir[BUF_LEN] = { 0 };
std::string celeba_img_root;
#if defined(_WIN32)
strcpy_s(save_dir, BUF_LEN - 1, celeba_img_fold);
#else
strcpy(save_dir, celeba_img_fold);
#endif
int len = strlen(save_dir);
if (save_dir[len - 1] == '/' || save_dir[len - 1] == '\\')
save_dir[--len] = '\0';
celeba_img_root = save_dir;
len = strlen(root);
if (root[len - 1] == '/' || root[len - 1] == '\\')
{
#if defined(_WIN32)
sprintf_s(save_dir, BUF_LEN - 1, "%sprepare_data/%d", root, size);
#else
sprintf(save_dir, "%sprepare_data/%d", root, size);
#endif
}
else
{
#if defined(_WIN32)
sprintf_s(save_dir, BUF_LEN - 1, "%s/prepare_data/%d", root, size);
#else
sprintf(save_dir, "%s/prepare_data/%d", root, size);
#endif
}
#if defined(_WIN32)
sprintf_s(landmark_save_dir, BUF_LEN - 1, "%s/landmark", save_dir);
#else
sprintf(landmark_save_dir, "%s/landmark", save_dir);
#endif
std::string landmark_file = std::string(save_dir) + "/landmark.txt";
std::vector<std::string> image_files;
std::vector<std::vector<float> > all_boxes;
std::vector<std::vector<float> > all_landmarks;
if (!_load_celeba_bbox_and_landmarks(celeba_bbox_file, celeba_landmark_file, image_files, all_boxes, all_landmarks))
{
printf("failed to load bbox and landmark file\n");
return false;
}
FILE* out = 0;
#if defined(_WIN32)
if (0 != fopen_s(&out, landmark_file.c_str(), "w"))
#else
if (0 == (out = fopen(landmark_file.c_str(), "w")))
#endif
{
printf("failed to create file %s\n", landmark_file.c_str());
return false;
}
int image_num = image_files.size();
int handled[1] = { 0 };
bool ret[1] = { true };
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,10)
for (int i = 0; i < image_num; i++)
{
std::vector<std::string> landmark_names;
bool flag = true;
if (ret[0])
{
std::string img_file = celeba_img_root + "/" + image_files[i];
flag = _generate_landmark_for_one_image(i, size, img_file, all_boxes[i],
all_landmarks[i], landmark_save_dir, base_num, landmark_names);
}
#pragma omp critical
{
if (!flag)
ret[0] = false;
for (int j = 0; j < landmark_names.size(); j++)
{
fprintf(out, "%s\n", landmark_names[j].c_str());
}
handled[0]++;
if (handled[0] % 100 == 0)
{
printf("%d handled\n", handled[0]);
}
}
}
fclose(out);
return ret[0];
}
public:
static bool _is_blank_c(char c)
{
return c == ' ' || c == '\t' || c == '\n';
}
static std::vector<std::string> _split_blank(const char* str)
{
std::vector<std::string> out;
int len = strlen(str);
std::vector<char> buf(len + 1);
int i = 0, j = 0;
while (1)
{
//skip blank
for (; i < len && _is_blank_c(str[i]); i++);
if (i >= len)
break;
for (j = i; j < len && !_is_blank_c(str[j]); j++);
int tmp_len = j - i;
if (tmp_len == 0)
break;
memcpy(&buf[0], str + i, tmp_len * sizeof(char));
buf[tmp_len] = '\0';
out.push_back(std::string(&buf[0]));
i = j;
}
return out;
}
static float _IOU(const float cur_box[4], const std::vector<float>& all_boxes, const std::string mode = "Union")
{
int box_num = all_boxes.size() / 4;
//the iou
float max_iou = 0;
float area1 = (cur_box[2] - cur_box[0])*(cur_box[3] - cur_box[1]);
for (int i = 0; i < box_num; i++)
{
float maxY = __max(cur_box[1], all_boxes[i * 4 + 1]);
float maxX = __max(cur_box[0], all_boxes[i * 4 + 0]);
float minY = __min(cur_box[3], all_boxes[i * 4 + 3]);
float minX = __min(cur_box[2], all_boxes[i * 4 + 2]);
//maxX1 and maxY1 reuse
maxX = __max(minX - maxX + 1, 0);
maxY = __max(minY - maxY + 1, 0);
//IOU reuse for the area of two bbox
float IOU = maxX * maxY;
float area2 = (all_boxes[i * 4 + 2] - all_boxes[i * 4])
*(all_boxes[i * 4 + 3] - all_boxes[i * 4 + 1]);
if (!mode.compare("Union"))
IOU = IOU / (area1 + area2 - IOU);
else if (!mode.compare("Min"))
{
IOU = IOU / __min(area1, area2);
}
max_iou = __max(max_iou, IOU);
}
return max_iou;
}
static int _randint(int low, int high)
{
if (high > low)
return rand() % (high - low) + low;
else
return low;
}
static bool _load_anno_and_prob(const char* anno_file, const char* prob_file,
std::vector<std::string>& image_files, std::vector<std::vector<float> >& all_boxes,
std::vector<std::vector<float> >& all_probs)
{
image_files.clear();
all_boxes.clear();
all_probs.clear();
FILE* in = 0, *in2 = 0;
#if defined(_WIN32)
if (0 != fopen_s(&in, anno_file, "r"))
#else
if (0 == (in = fopen(anno_file, "r")))
#endif
{
printf("failed to open %s\n", anno_file);
return false;
}
#if defined(_WIN32)
if (0 != fopen_s(&in2, prob_file, "r"))
#else
if (0 == (in2 = fopen(prob_file, "r")))
#endif
{
printf("failed to open %s\n", prob_file);
fclose(in);
return false;
}
const int BUF_LEN = 1024 * 1024;
char* buf = (char*)malloc(BUF_LEN);
char* buf2 = (char*)malloc(BUF_LEN);
memset(buf, 0, BUF_LEN);
memset(buf2, 0, BUF_LEN);
int handled[1] = { 0 };
while (true)
{
buf[0] = '\0';
buf2[0] = '\0';
fgets(buf, BUF_LEN - 1, in);
fgets(buf2, BUF_LEN - 1, in2);
if (buf[0] == '\0')
break;
int len = strlen(buf);
if (buf[len - 1] == '\n')
buf[--len] = '\0';
int len2 = strlen(buf2);
if (buf2[len2 - 1] == '\n')
buf2[--len2] = '\0';
std::vector<std::string> splits = _split_blank(buf);
std::vector<std::string> splits2 = _split_blank(buf2);
int split_num = splits.size();
int split_num2 = splits2.size();
if (split_num % 4 != 1)
{
printf("something is wrong: %s\n", buf);
fclose(in);
fclose(in2);
free(buf);
free(buf2);
return false;
}
std::string& img_file = splits[0];
int bbox_num = split_num / 4;
if (split_num2 != bbox_num + 1)
{
printf("something is wrong: %s, %s\n", buf, buf2);
fclose(in);
fclose(in2);
free(buf);
free(buf2);
return false;
}
image_files.push_back(img_file);
std::vector<float> boxes(bbox_num * 4);
std::vector<float> probs(bbox_num);
for (int i = 0; i < bbox_num; i++)
{
boxes[i * 4] = atoi(splits[i * 4 + 1].c_str());
boxes[i * 4 + 1] = atoi(splits[i * 4 + 2].c_str());
boxes[i * 4 + 2] = atoi(splits[i * 4 + 3].c_str());
boxes[i * 4 + 3] = atoi(splits[i * 4 + 4].c_str());
probs[i] = atof(splits2[i + 1].c_str());
}
all_boxes.push_back(boxes);
all_probs.push_back(probs);
}
fclose(in);
fclose(in2);
free(buf);
free(buf2);
return true;
}
static bool _load_celeba_bbox_and_landmarks(const char* celeba_bbox_file,
const char* celeba_landmark_file, std::vector<std::string>& image_files,
std::vector<std::vector<float> >& all_boxes,
std::vector<std::vector<float> >& all_landmarks)
{
image_files.clear();
all_boxes.clear();
all_landmarks.clear();
FILE* in = 0, *in2 = 0;
#if defined(_WIN32)
if (0 != fopen_s(&in, celeba_bbox_file, "r"))
#else
if (0 == (in = fopen(celeba_bbox_file, "r")))
#endif
{
printf("failed to open %s\n", celeba_bbox_file);
return false;
}
#if defined(_WIN32)
if (0 != fopen_s(&in2, celeba_landmark_file, "r"))
#else
if (0 == (in2 = fopen(celeba_landmark_file, "r")))
#endif
{
printf("failed to open %s\n", celeba_landmark_file);
fclose(in);
return false;
}
int line_id = 0;
int image_num = 0;
const int BUF_LEN = 1024 * 1024;
char* buf = (char*)malloc(BUF_LEN);
char* buf2 = (char*)malloc(BUF_LEN);
memset(buf, 0, BUF_LEN);
memset(buf2, 0, BUF_LEN);
int handled[1] = { 0 };
while (true)
{
buf[0] = '\0';
buf2[0] = '\0';
fgets(buf, BUF_LEN - 1, in);
fgets(buf2, BUF_LEN - 1, in2);
if (buf[0] == '\0')
break;
if (line_id == 0)
{
#if defined(_WIN32)
sscanf_s(buf, "%d", &image_num);
#else
sscanf(buf, "%d", &image_num);
#endif
}
if (line_id <= 1)
{
line_id++;
continue;
}
if (line_id + 2 >= image_num)
break;
line_id++;
int len = strlen(buf);
if (buf[len - 1] == '\n')
buf[--len] = '\0';
int len2 = strlen(buf2);
if (buf2[len2 - 1] == '\n')
buf2[--len2] = '\0';
std::vector<std::string> splits = _split_blank(buf);
std::vector<std::string> splits2 = _split_blank(buf2);
int split_num = splits.size();
int split_num2 = splits2.size();
if (split_num % 4 != 1)
{
printf("something is wrong: %s\n", buf);
fclose(in);
fclose(in2);
free(buf);
free(buf2);
return false;
}
std::string& img_file = splits[0];
int bbox_num = split_num / 4;
if (split_num2 != bbox_num*10 + 1)
{
printf("something is wrong: %s, %s\n", buf, buf2);
fclose(in);
fclose(in2);
free(buf);
free(buf2);
return false;
}
image_files.push_back(img_file);
std::vector<float> boxes(bbox_num * 4);
std::vector<float> landmarks(bbox_num * 10);
for (int i = 0; i < bbox_num; i++)
{
for (int j = 0; j < 4; j++)
boxes[i * 4 + j] = atoi(splits[i * 4 + j + 1].c_str());
for (int j = 0; j < 10; j++)
landmarks[i * 10 + j] = atof(splits2[i * 10 + j + 1].c_str());
}
all_boxes.push_back(boxes);
all_landmarks.push_back(landmarks);
}
fclose(in);
fclose(in2);
free(buf);
free(buf2);
return true;
}
static bool _generate_data_for_one_image(int idx, int size, const std::string& image_file,
const std::vector<float>& boxes, const std::vector<float>& probs, float prob_thresh,
const std::string& pos_save_dir, const std::string& part_save_dir,
const std::string& neg_save_dir, int base_num, std::vector<std::string>& pos_names,
std::vector<std::string>& part_names, std::vector<std::string>& neg_names)
{
const int BUF_LEN = 500;
char tmp_buf[BUF_LEN];
std::string file_name, line;
pos_names.clear();
part_names.clear();
neg_names.clear();
int box_num = boxes.size() / 4;
cv::Mat img = cv::imread(image_file, 1);
if (img.empty())
{
printf("failed to load image %s\n", image_file.c_str());
return false;
}
int width = img.cols;
int height = img.rows;
int min_size = __min(width, height) / 2;
if (min_size <= size)
{
return true;
}
cv::Mat resized_im, brighter_im, darker_im;
// neg
int neg_num = 0, pos_num = 0, part_num = 0;
while (neg_num < base_num * 50)
{
int cur_size = _randint(size, min_size);
int nx = rand() % (width - cur_size);
int ny = rand() % (height - cur_size);
float crop_box[4] = { nx, ny, nx + cur_size, ny + cur_size };
float iou = _IOU(crop_box, boxes);
cv::Mat cropped_im = img(cv::Rect(cv::Point(crop_box[0], crop_box[1]), cv::Point(crop_box[2], crop_box[3])));
if (cropped_im.empty())
continue;
if (iou < 0.3)
{
cv::resize(cropped_im, resized_im, cv::Size(size, size));
resized_im.convertTo(brighter_im, resized_im.type(), 1.25);
resized_im.convertTo(darker_im, resized_im.type(), 0.8);
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, neg_num);
#endif
file_name = neg_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, resized_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d 0", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d 0", idx, neg_num);
#endif
line = neg_save_dir + "/" + std::string(tmp_buf);
neg_names.push_back(line);
neg_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, neg_num);
#endif
file_name = neg_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, brighter_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d 0", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d 0", idx, neg_num);
#endif
line = neg_save_dir + "/" + std::string(tmp_buf);
neg_names.push_back(line);
neg_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, neg_num);
#endif
file_name = neg_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, darker_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d 0", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d 0", idx, neg_num);
#endif
line = neg_save_dir + "/" + std::string(tmp_buf);
neg_names.push_back(line);
neg_num++;
}
}
for (int bb = 0; bb < box_num; bb++)
{
int x1 = boxes[bb * 4];
int y1 = boxes[bb * 4 + 1];
int x2 = boxes[bb * 4 + 2];
int y2 = boxes[bb * 4 + 3];
int w = x2 - x1 + 1;
int h = y2 - y1 + 1;
if (__max(w, h) < 40 || x1 < 0 || y1 < 0 || probs[bb] < prob_thresh)
continue;
//neg
for (int i = 0; i < base_num * 2; i++)
{
int cur_size = _randint(size, min_size);
int delta_x = _randint(__max(-cur_size, -x1), w);
int delta_y = _randint(__max(-cur_size, -y1), h);
int nx1 = __max(0, x1 + delta_x);
int ny1 = __max(0, y1 + delta_y);
if (nx1 + cur_size > width || ny1 + cur_size > height)
continue;
float crop_box[4] = { nx1, ny1, nx1 + cur_size, ny1 + cur_size };
float iou = _IOU(crop_box, boxes);
cv::Mat cropped_im = img(cv::Rect(cv::Point(crop_box[0], crop_box[1]), cv::Point(crop_box[2], crop_box[3])));
if (cropped_im.empty())
continue;
if (iou < 0.3)
{
cv::resize(cropped_im, resized_im, cv::Size(size, size));
resized_im.convertTo(brighter_im, resized_im.type(), 1.25);
resized_im.convertTo(darker_im, resized_im.type(), 0.8);
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, neg_num);
#endif
file_name = neg_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, resized_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d 0", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d 0", idx, neg_num);
#endif
line = neg_save_dir + "/" + std::string(tmp_buf);
neg_names.push_back(line);
neg_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, neg_num);
#endif
file_name = neg_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, brighter_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d 0", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d 0", idx, neg_num);
#endif
line = neg_save_dir + "/" + std::string(tmp_buf);
neg_names.push_back(line);
neg_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, neg_num);
#endif
file_name = neg_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, darker_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d 0", idx, neg_num);
#else
sprintf(tmp_buf, "%d_%d 0", idx, neg_num);
#endif
line = neg_save_dir + "/" + std::string(tmp_buf);
neg_names.push_back(line);
neg_num++;
}
}
//pos & part
for (int i = 0; i < base_num * 8; i++)
{
int cur_size = _randint(__min(w, h) * 0.8, ceil(1.25 * __max(w, h)));
int delta_x = _randint(-w * 0.2, w * 0.2);
int delta_y = _randint(-h * 0.2, h * 0.2);
int nx1 = int(__max(x1 + w / 2 + delta_x - cur_size / 2, 0));
int ny1 = int(__max(y1 + h / 2 + delta_y - cur_size / 2, 0));
int nx2 = nx1 + cur_size;
int ny2 = ny1 + cur_size;
if (nx2 > width || ny2 > height)
continue;
float crop_box[4] = { nx1, ny1, nx1 + cur_size, ny1 + cur_size };
float iou = _IOU(crop_box, boxes);
cv::Mat cropped_im = img(cv::Rect(cv::Point(crop_box[0], crop_box[1]), cv::Point(crop_box[2], crop_box[3])));
if (cropped_im.empty())
continue;
float offset_x1 = (x1 - nx1) / float(cur_size);
float offset_y1 = (y1 - ny1) / float(cur_size);
float offset_x2 = (x2 - nx2) / float(cur_size);
float offset_y2 = (y2 - ny2) / float(cur_size);
if (iou >= 0.65)
{
cv::resize(cropped_im, resized_im, cv::Size(size, size));
resized_im.convertTo(brighter_im, resized_im.type(), 1.25);
resized_im.convertTo(darker_im, resized_im.type(), 0.8);
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, pos_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, pos_num);
#endif
file_name = pos_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, resized_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d 1 %.2f %.2f %.2f %.2f", idx, pos_num,
#else
sprintf(tmp_buf, "%d_%d 1 %.2f %.2f %.2f %.2f", idx, pos_num,
#endif
offset_x1, offset_y1, offset_x2, offset_y2);
line = pos_save_dir + "/" + std::string(tmp_buf);
pos_names.push_back(line);
pos_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, pos_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, pos_num);
#endif
file_name = pos_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, brighter_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d 1 %.2f %.2f %.2f %.2f", idx, pos_num,
#else
sprintf(tmp_buf, "%d_%d 1 %.2f %.2f %.2f %.2f", idx, pos_num,
#endif
offset_x1, offset_y1, offset_x2, offset_y2);
line = pos_save_dir + "/" + std::string(tmp_buf);
pos_names.push_back(line);
pos_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, pos_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, pos_num);
#endif
file_name = pos_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, darker_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d 1 %.2f %.2f %.2f %.2f", idx, pos_num,
#else
sprintf(tmp_buf, "%d_%d 1 %.2f %.2f %.2f %.2f", idx, pos_num,
#endif
offset_x1, offset_y1, offset_x2, offset_y2);
line = pos_save_dir + "/" + std::string(tmp_buf);
pos_names.push_back(line);
pos_num++;
}
else if (iou >= 0.4)
{
if (rand() % 100 <= 40)
{
cv::resize(cropped_im, resized_im, cv::Size(size, size));
resized_im.convertTo(brighter_im, resized_im.type(), 1.25);
resized_im.convertTo(darker_im, resized_im.type(), 0.8);
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, part_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, part_num);
#endif
file_name = part_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, resized_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d -1 %.2f %.2f %.2f %.2f", idx, part_num,
#else
sprintf(tmp_buf, "%d_%d -1 %.2f %.2f %.2f %.2f", idx, part_num,
#endif
offset_x1, offset_y1, offset_x2, offset_y2);
line = part_save_dir + "/" + std::string(tmp_buf);
part_names.push_back(line);
part_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, part_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, part_num);
#endif
file_name = part_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, brighter_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d -1 %.2f %.2f %.2f %.2f", idx, part_num,
#else
sprintf(tmp_buf, "%d_%d -1 %.2f %.2f %.2f %.2f", idx, part_num,
#endif
offset_x1, offset_y1, offset_x2, offset_y2);
line = part_save_dir + "/" + std::string(tmp_buf);
part_names.push_back(line);
part_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d.jpg", idx, part_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, part_num);
#endif
file_name = part_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, darker_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN-1, "%d_%d -1 %.2f %.2f %.2f %.2f", idx, part_num,
#else
sprintf(tmp_buf, "%d_%d -1 %.2f %.2f %.2f %.2f", idx, part_num,
#endif
offset_x1, offset_y1, offset_x2, offset_y2);
line = part_save_dir + "/" + std::string(tmp_buf);
part_names.push_back(line);
part_num++;
}
}
}
}
return true;
}
static bool _generate_landmark_for_one_image(int idx, int size, const std::string& image_file,
const std::vector<float>& boxes, const std::vector<float>& landmarks,
const std::string& landmark_save_dir, int base_num, std::vector<std::string>& landmark_names)
{
const int BUF_LEN = 500;
char tmp_buf[BUF_LEN];
std::string file_name, line;
landmark_names.clear();
int box_num = boxes.size() / 4;
cv::Mat img = cv::imread(image_file, 1);
if (img.empty())
{
printf("failed to load image %s\n", image_file.c_str());
return false;
}
int width = img.cols;
int height = img.rows;
int min_size = __min(width, height) / 2;
if (min_size <= size)
{
return true;
}
cv::Mat rot_im;
cv::Mat resized_im, brighter_im, darker_im;
double angles[13] = { 0,-15,-30,-45,-60,-75,-90,15,30,45,60,75,90 };
int angle_num = 13;
float rot_landmark[10];
int landmark_num = 0;
for (int bb = 0; bb < box_num; bb++)
{
int x1 = boxes[bb * 4];
int y1 = boxes[bb * 4 + 1];
int w = boxes[bb * 4 + 2];
int h = boxes[bb * 4 + 3];
if (__max(w, h) < 40 || x1 < 0 || y1 < 0)
continue;
float cx = landmarks[bb * 10 + 4];
float cy = landmarks[bb * 10 + 5];
for (int aa = 0; aa < angle_num; aa++)
{
cv::Mat rotM = cv::getRotationMatrix2D(cv::Point2f(cx, cy), angles[aa], 1);
for (int i = 0; i < 5; i++)
{
rot_landmark[i * 2 + 0] = rotM.at<double>(0, 0)*landmarks[bb * 10 + i * 2]
+ rotM.at<double>(0, 1)*landmarks[bb * 10 + i * 2 + 1]
+ rotM.at<double>(0, 2);
rot_landmark[i * 2 + 1] = rotM.at<double>(1, 0)*landmarks[bb * 10 + i * 2]
+ rotM.at<double>(1, 1)*landmarks[bb * 10 + i * 2 + 1]
+ rotM.at<double>(1, 2);
}
cv::warpAffine(img, rot_im, rotM, cv::Size(width, height));
for (int i = 0; i < base_num; i++)
{
int cur_size = _randint(__min(w, h) * 0.8, ceil(1.25 * __max(w, h)));
int delta_x = _randint(-w * 0.15, w * 0.15);
int delta_y = _randint(-h * 0.15, h * 0.15);
int nx1 = int(__max(x1 + w / 2 + delta_x - cur_size / 2, 0));
int ny1 = int(__max(y1 + h / 2 + delta_y - cur_size / 2, 0));
int nx2 = nx1 + cur_size;
int ny2 = ny1 + cur_size;
if (nx2 > width || ny2 > height)
continue;
float crop_box[4] = { nx1, ny1, nx1 + cur_size, ny1 + cur_size };
cv::Mat cropped_im = img(cv::Rect(cv::Point(crop_box[0], crop_box[1]), cv::Point(crop_box[2], crop_box[3])));
if (cropped_im.empty())
continue;
float offset_x1 = (rot_landmark[0] - nx1 + 0.5) / float(cur_size);
float offset_y1 = (rot_landmark[1] - ny1 + 0.5) / float(cur_size);
float offset_x2 = (rot_landmark[2] - nx1 + 0.5) / float(cur_size);
float offset_y2 = (rot_landmark[3] - ny1 + 0.5) / float(cur_size);
float offset_x3 = (rot_landmark[4] - nx1 + 0.5) / float(cur_size);
float offset_y3 = (rot_landmark[5] - ny1 + 0.5) / float(cur_size);
float offset_x4 = (rot_landmark[6] - nx1 + 0.5) / float(cur_size);
float offset_y4 = (rot_landmark[7] - ny1 + 0.5) / float(cur_size);
float offset_x5 = (rot_landmark[8] - nx1 + 0.5) / float(cur_size);
float offset_y5 = (rot_landmark[9] - ny1 + 0.5) / float(cur_size);
cv::resize(cropped_im, resized_im, cv::Size(size, size));
resized_im.convertTo(brighter_im, resized_im.type(), 1.25);
resized_im.convertTo(darker_im, resized_im.type(), 0.8);
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN - 1, "%d_%d.jpg", idx, landmark_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, landmark_num);
#endif
file_name = landmark_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, resized_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN - 1, "%d_%d -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f",
#else
sprintf(tmp_buf, "%d_%d -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f",
#endif
idx, landmark_num,
offset_x1, offset_x2, offset_x3, offset_x4, offset_x5,
offset_y1, offset_y2, offset_y3, offset_y4, offset_y5);
line = landmark_save_dir + "/" + std::string(tmp_buf);
landmark_names.push_back(line);
landmark_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN - 1, "%d_%d.jpg", idx, landmark_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, landmark_num);
#endif
file_name = landmark_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, brighter_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN - 1, "%d_%d -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f",
#else
sprintf(tmp_buf, "%d_%d -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f",
#endif
idx, landmark_num,
offset_x1, offset_x2, offset_x3, offset_x4, offset_x5,
offset_y1, offset_y2, offset_y3, offset_y4, offset_y5);
line = landmark_save_dir + "/" + std::string(tmp_buf);
landmark_names.push_back(line);
landmark_num++;
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN - 1, "%d_%d.jpg", idx, landmark_num);
#else
sprintf(tmp_buf, "%d_%d.jpg", idx, landmark_num);
#endif
file_name = landmark_save_dir + "/" + std::string(tmp_buf);
if (!cv::imwrite(file_name, darker_im))
{
printf("failed to write image %s\n", file_name.c_str());
return false;
}
#if defined(_WIN32)
sprintf_s(tmp_buf, BUF_LEN - 1, "%d_%d -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f",
#else
sprintf(tmp_buf, "%d_%d -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f",
#endif
idx, landmark_num,
offset_x1, offset_x2, offset_x3, offset_x4, offset_x5,
offset_y1, offset_y2, offset_y3, offset_y4, offset_y5);
line = landmark_save_dir + "/" + std::string(tmp_buf);
landmark_names.push_back(line);
landmark_num++;
}
}
}
return true;
}
};
}
#endif
|
GB_unaryop__minv_int16_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int16_uint8
// op(A') function: GB_tran__minv_int16_uint8
// C type: int16_t
// A type: uint8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int16_uint8
(
int16_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(8*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(4*t1+Ny+5,16)),floord(8*t2+Ny+4,16)),floord(8*t1-8*t2+Nz+Ny+3,16));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(8*t2-Nz-124,128)),ceild(16*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(4*t1+Nx+5,128)),floord(8*t2+Nx+4,128)),floord(16*t3+Nx+12,128)),floord(8*t1-8*t2+Nz+Nx+3,128));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),16*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),16*t3+14),128*t4+126),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unop__identity_fp32_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_int16)
// op(A') function: GB (_unop_tran__identity_fp32_int16)
// C type: float
// A type: int16_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_int16)
(
float *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_int32
// op(A') function: GB_tran__identity_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_int32
(
int8_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utils.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
#pragma once
#include <fcntl.h>
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <string>
#include <memory>
#include <random>
#include <set>
#ifdef __APPLE__
#else
#include <malloc.h>
#endif
#ifdef _WINDOWS
#include <Windows.h>
typedef HANDLE FileHandle;
#else
#include <unistd.h>
typedef int FileHandle;
#endif
#include "logger.h"
#include "cached_io.h"
#include "common_includes.h"
#include "windows_customizations.h"
#ifdef EXEC_ENV_OLS
#include "content_buf.h"
#include "memory_mapped_files.h"
#endif
// taken from
// https://github.com/Microsoft/BLAS-on-flash/blob/master/include/utils.h
// round up X to the nearest multiple of Y
#define ROUND_UP(X, Y) \
((((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0)) * (Y))
#define DIV_ROUND_UP(X, Y) (((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0))
// round down X to the nearest multiple of Y
#define ROUND_DOWN(X, Y) (((uint64_t)(X) / (Y)) * (Y))
// alignment tests
#define IS_ALIGNED(X, Y) ((uint64_t)(X) % (uint64_t)(Y) == 0)
#define IS_512_ALIGNED(X) IS_ALIGNED(X, 512)
#define IS_4096_ALIGNED(X) IS_ALIGNED(X, 4096)
typedef uint64_t _u64;
typedef int64_t _s64;
typedef uint32_t _u32;
typedef int32_t _s32;
typedef uint16_t _u16;
typedef int16_t _s16;
typedef uint8_t _u8;
typedef int8_t _s8;
namespace diskann {
static const size_t MAX_SIZE_OF_STREAMBUF = 2LL * 1024 * 1024 * 1024;
enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 };
inline void alloc_aligned(void** ptr, size_t size, size_t align) {
*ptr = nullptr;
assert(IS_ALIGNED(size, align));
#ifndef _WINDOWS
*ptr = ::aligned_alloc(align, size);
#else
*ptr = ::_aligned_malloc(size, align); // note the swapped arguments!
#endif
assert(*ptr != nullptr);
}
inline void aligned_free(void* ptr) {
// Gopal. Must have a check here if the pointer was actually allocated by
// _alloc_aligned
if (ptr == nullptr) {
return;
}
#ifndef _WINDOWS
free(ptr);
#else
::_aligned_free(ptr);
#endif
}
inline void GenRandom(std::mt19937& rng, unsigned* addr, unsigned size,
unsigned N) {
for (unsigned i = 0; i < size; ++i) {
addr[i] = rng() % (N - size);
}
std::sort(addr, addr + size);
for (unsigned i = 1; i < size; ++i) {
if (addr[i] <= addr[i - 1]) {
addr[i] = addr[i - 1] + 1;
}
}
unsigned off = rng() % N;
for (unsigned i = 0; i < size; ++i) {
addr[i] = (addr[i] + off) % N;
}
}
// get_bin_metadata functions START
inline void get_bin_metadata_impl(std::basic_istream<char>& reader,
size_t& nrows, size_t& ncols) {
int nrows_32, ncols_32;
reader.read((char*) &nrows_32, sizeof(int));
reader.read((char*) &ncols_32, sizeof(int));
nrows = nrows_32;
ncols = ncols_32;
}
#ifdef EXEC_ENV_OLS
inline void get_bin_metadata(MemoryMappedFiles& files,
const std::string& bin_file, size_t& nrows,
size_t& ncols) {
diskann::cout << "Getting metadata for file: " << bin_file << std::endl;
auto fc = files.getContent(bin_file);
auto cb = ContentBuf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&cb);
get_bin_metadata_impl(reader, nrows, ncols);
}
#endif
inline void get_bin_metadata(const std::string& bin_file, size_t& nrows,
size_t& ncols) {
std::ifstream reader(bin_file.c_str(), std::ios::binary);
get_bin_metadata_impl(reader, nrows, ncols);
}
// get_bin_metadata functions END
template<typename T>
inline std::string getValues(T* data, size_t num) {
std::stringstream stream;
stream << "[";
for (size_t i = 0; i < num; i++) {
stream << std::to_string(data[i]) << ",";
}
stream << "]" << std::endl;
return stream.str();
}
// load_bin functions START
template<typename T>
inline void load_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data, size_t& npts,
size_t& dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data = new T[npts * dim];
reader.read((char*) data, npts * dim * sizeof(T));
// diskann::cout << "Last bytes: "
// << getValues<T>(data + (npts - 2) * dim, dim);
// diskann::cout << "Finished reading bin file." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
T*& data, size_t& npts, size_t& dim) {
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
auto fc = files.getContent(bin_file);
uint32_t t_npts, t_dim;
uint32_t* contentAsIntPtr = (uint32_t*) (fc._content);
t_npts = *(contentAsIntPtr);
t_dim = *(contentAsIntPtr + 1);
npts = t_npts;
dim = t_dim;
auto actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != fc._size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << fc._size
<< " while expected size is " << actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data =
(T*) ((char*) fc._content + 2 * sizeof(uint32_t)); // No need to copy!
}
#endif
template<typename T>
inline void load_bin(const std::string& bin_file, T*& data, size_t& npts,
size_t& dim) {
// OLS
//_u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_bin_impl<T>(reader, fsize, data, npts, dim);
}
// load_bin functions END
inline void load_truthset(const std::string& bin_file, uint32_t*& ids,
float*& dists, size_t& npts, size_t& dim) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
int truthset_type = -1; // 1 means truthset has ids and distances, 2 means
// only ids, -1 is error
size_t expected_file_size_with_dists =
2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_with_dists)
truthset_type = 1;
size_t expected_file_size_just_ids =
npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_just_ids)
truthset_type = 2;
if (truthset_type == -1) {
std::stringstream stream;
stream << "Error. File size mismatch. File should have bin format, with "
"npts followed by ngt followed by npts*ngt ids and optionally "
"followed by npts*ngt distance values; actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size_with_dists << " or "
<< expected_file_size_just_ids;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
ids = new uint32_t[npts * dim];
reader.read((char*) ids, npts * dim * sizeof(uint32_t));
if (truthset_type == 1) {
dists = new float[npts * dim];
reader.read((char*) dists, npts * dim * sizeof(float));
}
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
std::unique_ptr<T[]>& data, size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(files, bin_file, ptr, npts, dim);
data.reset(ptr);
}
#endif
template<typename T>
inline void load_bin(const std::string& bin_file, std::unique_ptr<T[]>& data,
size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(bin_file, ptr, npts, dim);
data.reset(ptr);
}
template<typename T>
inline void save_bin(const std::string& filename, T* data, size_t npts,
size_t ndims) {
std::ofstream writer(filename, std::ios::binary | std::ios::out);
diskann::cout << "Writing bin: " << filename.c_str() << std::endl;
int npts_i32 = (int) npts, ndims_i32 = (int) ndims;
writer.write((char*) &npts_i32, sizeof(int));
writer.write((char*) &ndims_i32, sizeof(int));
diskann::cout << "bin: #pts = " << npts << ", #dims = " << ndims
<< ", size = " << npts * ndims * sizeof(T) + 2 * sizeof(int)
<< "B" << std::endl;
// data = new T[npts_u64 * ndims_u64];
writer.write((char*) data, npts * ndims * sizeof(T));
writer.close();
diskann::cout << "Finished writing bin." << std::endl;
}
// load_aligned_bin functions START
template<typename T>
inline void load_aligned_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data,
size_t& npts, size_t& dim,
size_t& rounded_dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str() << std::endl;
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
rounded_dim = ROUND_UP(dim, 16);
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim
<< ", aligned_dim = " << rounded_dim << "..." << std::flush;
size_t allocSize = npts * rounded_dim * sizeof(T);
diskann::cout << "allocating aligned memory, " << allocSize << " bytes..."
<< std::flush;
alloc_aligned(((void**) &data), allocSize, 8 * sizeof(T));
diskann::cout << "done. Copying data..." << std::flush;
for (size_t i = 0; i < npts; i++) {
reader.read((char*) (data + i * rounded_dim), dim * sizeof(T));
memset(data + i * rounded_dim + dim, 0, (rounded_dim - dim) * sizeof(T));
}
diskann::cout << " done." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_aligned_bin(MemoryMappedFiles& files,
const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
FileContent fc = files.getContent(bin_file);
ContentBuf buf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&buf);
size_t actual_file_size = fc._size;
load_aligned_bin_impl(reader, actual_file_size, data, npts, dim,
rounded_dim);
}
#endif
template<typename T>
inline void load_aligned_bin(const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
// START OLS
//_u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_aligned_bin_impl(reader, fsize, data, npts, dim, rounded_dim);
}
template<typename InType, typename OutType>
void convert_types(const InType* srcmat, OutType* destmat, size_t npts,
size_t dim) {
#pragma omp parallel for schedule(static, 65536)
for (int64_t i = 0; i < (_s64) npts; i++) {
for (uint64_t j = 0; j < dim; j++) {
destmat[i * dim + j] = (OutType) srcmat[i * dim + j];
}
}
}
// plain saves data as npts X ndims array into filename
template<typename T>
void save_Tvecs(const char* filename, T* data, size_t npts, size_t ndims) {
std::string fname(filename);
// create cached ofstream with 64MB cache
cached_ofstream writer(fname, 64 * 1048576);
unsigned dims_u32 = (unsigned) ndims;
// start writing
for (uint64_t i = 0; i < npts; i++) {
// write dims in u32
writer.write((char*) &dims_u32, sizeof(unsigned));
// get cur point in data
T* cur_pt = data + i * ndims;
writer.write((char*) cur_pt, ndims * sizeof(T));
}
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T0);
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector_l2(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T1);
}
}; // namespace diskann
struct PivotContainer {
PivotContainer() = default;
PivotContainer(size_t pivo_id, float pivo_dist)
: piv_id{pivo_id}, piv_dist{pivo_dist} {
}
bool operator<(const PivotContainer& p) const {
return p.piv_dist < piv_dist;
}
bool operator>(const PivotContainer& p) const {
return p.piv_dist > piv_dist;
}
size_t piv_id;
float piv_dist;
};
inline bool file_exists(const std::string& name) {
struct stat buffer;
auto val = stat(name.c_str(), &buffer);
diskann::cout << " Stat(" << name.c_str() << ") returned: " << val
<< std::endl;
return (val == 0);
}
inline _u64 get_file_size(const std::string& fname) {
std::ifstream reader(fname, std::ios::binary | std::ios::ate);
if (!reader.fail() && reader.is_open()) {
_u64 end_pos = reader.tellg();
diskann::cout << " Tellg: " << reader.tellg() << " as u64: " << end_pos
<< std::endl;
reader.close();
return end_pos;
} else {
diskann::cout << "Could not open file: " << fname << std::endl;
return 0;
}
}
inline bool validate_file_size(const std::string& name) {
std::ifstream in(std::string(name), std::ios::binary);
in.seekg(0, in.end);
size_t actual_file_size = in.tellg();
in.seekg(0, in.beg);
size_t expected_file_size;
in.read((char*) &expected_file_size, sizeof(uint64_t));
if (actual_file_size != expected_file_size) {
diskann::cout << "Error loading" << name << ". Expected "
"size (metadata): "
<< expected_file_size
<< ", actual file size : " << actual_file_size
<< ". Exitting." << std::endl;
in.close();
return false;
}
in.close();
return true;
}
#ifdef _WINDOWS
#include <intrin.h>
#include <Psapi.h>
inline void printProcessMemory(const char* message) {
PROCESS_MEMORY_COUNTERS counters;
HANDLE h = GetCurrentProcess();
GetProcessMemoryInfo(h, &counters, sizeof(counters));
diskann::cout << message << " [Peaking Working Set size: "
<< counters.PeakWorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Working set size: "
<< counters.WorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Private bytes "
<< counters.PagefileUsage * 1.0 / (1024 * 1024 * 1024) << "GB]"
<< std::endl;
}
#else
inline void printProcessMemory(const char* message) {
diskann::cout << message << std::endl;
}
#endif
extern bool AvxSupportedCPU;
extern bool Avx2SupportedCPU;
extern bool Avx512SupportedCPU;
|
volumeramdistancetransform.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2016-2020 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#ifndef IVW_VOLUMERAMDISTANCETRANSFORM_H
#define IVW_VOLUMERAMDISTANCETRANSFORM_H
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/common/inviwo.h>
#include <inviwo/core/util/indexmapper.h>
#include <inviwo/core/datastructures/volume/volume.h>
#include <inviwo/core/datastructures/volume/volumeramprecision.h>
#ifdef IVW_USE_OPENMP
#include <omp.h>
#endif
namespace inviwo {
namespace util {
/**
* Implementation of Euclidean Distance Transform according to Saito's algorithm:
* T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations
* of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11).
* pp. 1551-1565, 1994.
* http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf
*
* Calculates the distance in grid index space
* * Predicate is a function of type (const T &value) -> bool to deside if a value in the input
* is a "feature".
* * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all
* squared distance values at the end of the calculation.
* * ProcessCallback is a function of type (double progress) -> void that is called with a value
* from 0 to 1 to indicate the progress of the calculation.
*/
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume,
VolumeRAMPrecision<U> *outDistanceField, const Matrix<3, U> basis,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename T, typename U>
void volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume,
VolumeRAMPrecision<U> *outDistanceField, const Matrix<3, U> basis,
const size3_t upsample);
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename U, typename ProgressCallback>
void volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale, ProgressCallback callback);
template <typename U>
void volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale);
} // namespace util
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume,
VolumeRAMPrecision<U> *outDistanceField,
const Matrix<3, U> basis, const size3_t upsample,
Predicate predicate, ValueTransform valueTransform,
ProgressCallback callback) {
#ifdef IVW_USE_OPENMP
omp_set_num_threads(std::thread::hardware_concurrency());
#endif
using int64 = glm::int64;
auto square = [](auto a) { return a * a; };
callback(0.0);
const T *src = inVolume->getDataTyped();
U *dst = outDistanceField->getDataTyped();
const i64vec3 srcDim{inVolume->getDimensions()};
const i64vec3 dstDim{outDistanceField->getDimensions()};
const i64vec3 sm{upsample};
const auto squareBasis = glm::transpose(basis) * basis;
const Vector<3, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1], squareBasis[2][2]};
const Vector<3, U> squareVoxelSize{squareBasisDiag / Vector<3, U>{dstDim * dstDim}};
const Vector<3, U> invSquareVoxelSize{Vector<3, U>{1.0f} / squareVoxelSize};
{
const auto maxdist = glm::compMax(squareBasisDiag);
bool orthogonal = true;
for (size_t i = 0; i < squareBasis.length(); i++) {
for (size_t j = 0; j < squareBasis.length(); j++) {
if (i != j) {
if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) {
orthogonal = false;
break;
}
}
}
}
if (!orthogonal) {
LogWarnCustom(
"volumeRAMDistanceTransform",
"Calculating the distance transform on a non-orthogonal volume will not give "
"correct values");
}
}
if (srcDim * sm != dstDim) {
throw Exception(
"DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) +
" dst = " + toString(dstDim) + " scaling = " + toString(sm),
IVW_CONTEXT_CUSTOM("volumeRAMDistanceTransform"));
}
util::IndexMapper<3, int64> srcInd(srcDim);
util::IndexMapper<3, int64> dstInd(dstDim);
auto is_feature = [&](const int64 x, const int64 y, const int64 z) {
return predicate(src[srcInd(x / sm.x, y / sm.y, z / sm.z)]);
};
// first pass, forward and backward scan along x
// result: min distance in x direction
#pragma omp parallel for
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 y = 0; y < dstDim.y; ++y) {
// forward
U dist = static_cast<U>(dstDim.x);
for (int64 x = 0; x < dstDim.x; ++x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] = squareVoxelSize.x * square(dist);
}
// backward
dist = static_cast<U>(dstDim.x);
for (int64 x = dstDim.x - 1; x >= 0; --x) {
if (!is_feature(x, y, z)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y, z)] =
std::min<U>(dst[dstInd(x, y, z)], squareVoxelSize.x * square(dist));
}
}
}
// second pass, scan y direction
// for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY
// result: min distance in x and y direction
callback(0.3);
#pragma omp parallel
{
std::vector<U> buff;
buff.resize(dstDim.y);
#pragma omp for
for (int64 z = 0; z < dstDim.z; ++z) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 y = 0; y < dstDim.y; ++y) {
buff[y] = dst[dstInd(x, y, z)];
}
for (int64 y = 0; y < dstDim.y; ++y) {
auto d = buff[y];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1;
const auto rStart = std::min(rMax, y - 1);
const auto rEnd = std::min(rMax, dstDim.y - y);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[y + n] + squareVoxelSize.y * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// third pass, scan z direction
// for each voxel v(x,y,z) find min_i(data(x,y,i) + (z - i)^2), 0 <= i < dimZ
// result: min distance in x and y direction
callback(0.6);
#pragma omp parallel
{
std::vector<U> buff;
buff.resize(dstDim.z);
#pragma omp for
for (int64 y = 0; y < dstDim.y; ++y) {
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 z = 0; z < dstDim.z; ++z) {
buff[z] = dst[dstInd(x, y, z)];
}
for (int64 z = 0; z < dstDim.z; ++z) {
auto d = buff[z];
if (d != U(0)) {
const auto rMax =
static_cast<int64>(std::sqrt(d * invSquareVoxelSize.z)) + 1;
const auto rStart = std::min(rMax, z - 1);
const auto rEnd = std::min(rMax, dstDim.z - z);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[z + n] + squareVoxelSize.z * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y, z)] = d;
}
}
}
}
// scale data
callback(0.9);
const int64 volSize = dstDim.x * dstDim.y * dstDim.z;
#pragma omp parallel for
for (int64 i = 0; i < volSize; ++i) {
dst[i] = valueTransform(dst[i]);
}
callback(1.0);
}
template <typename T, typename U>
void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume,
VolumeRAMPrecision<U> *outDistanceField,
const Matrix<3, U> basis, const size3_t upsample) {
util::volumeRAMDistanceTransform(
inVolume, outDistanceField, basis, upsample,
[](const T &val) { return util::glm_convert_normalized<double>(val) > 0.5; },
[](const U &squareDist) {
return static_cast<U>(std::sqrt(static_cast<double>(squareDist)));
},
[](double f) {});
}
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample,
predicate, valueTransform, callback);
});
}
template <typename U, typename ProgressCallback>
void util::volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale,
ProgressCallback progress) {
const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>();
inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) {
using ValueType = util::PrecisionValueType<decltype(vrprecision)>;
const auto predicateIn = [threshold](const ValueType &val) { return val < threshold; };
const auto predicateOut = [threshold](const ValueType &val) { return val > threshold; };
const auto normPredicateIn = [threshold](const ValueType &val) {
return util::glm_convert_normalized<double>(val) < threshold;
};
const auto normPredicateOut = [threshold](const ValueType &val) {
return util::glm_convert_normalized<double>(val) > threshold;
};
const auto valTransIdent = [scale](const float &squareDist) {
return static_cast<float>(scale * squareDist);
};
const auto valTransSqrt = [scale](const float &squareDist) {
return static_cast<float>(scale * std::sqrt(squareDist));
};
if (normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransIdent, progress);
} else if (normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransIdent, progress);
} else if (normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateIn, valTransSqrt, progress);
} else if (normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, normPredicateOut, valTransSqrt, progress);
} else if (!normalize && square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransIdent, progress);
} else if (!normalize && square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransIdent, progress);
} else if (!normalize && !square && flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateIn, valTransSqrt, progress);
} else if (!normalize && !square && !flip) {
util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(),
upsample, predicateOut, valTransSqrt, progress);
}
});
}
template <typename U>
void util::volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField,
const size3_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale) {
util::volumeDistanceTransform(inVolume, outDistanceField, upsample, threshold, normalize, flip,
square, scale, [](double) {});
}
} // namespace inviwo
#endif // IVW_VOLUMERAMDISTANCETRANSFORM_H
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_random_paths(char **paths, int n, int m)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char **replace_paths = calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
crop = random_augment_image(im, angle, aspect, min, max, size, size);
}
int flip = rand()%2;
if (flip) flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
float x, y, h, w;
int id;
int count = 0;
int size = 64;
box_label *boxes = calloc(size, sizeof(box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
boxes = realloc(boxes, size*sizeof(box_label));
}
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 30; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .005 || h < .005) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void load_rle(image im, int *rle, int n)
{
int count = 0;
int curr = 0;
int i,j;
for(i = 0; i < n; ++i){
for(j = 0; j < rle[i]; ++j){
im.data[count++] = curr;
}
curr = 1 - curr;
}
for(; count < im.h*im.w*im.c; ++count){
im.data[count] = curr;
}
}
void or_image(image src, image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
}
}
void exclusive_image(image src)
{
int k, j, i;
int s = src.w*src.h;
for(k = 0; k < src.c-1; ++k){
for(i = 0; i < s; ++i){
if (src.data[k*s + i]){
for(j = k+1; j < src.c; ++j){
src.data[j*s + i] = 0;
}
}
}
}
}
box bound_image(image im)
{
int x,y;
int minx = im.w;
int miny = im.h;
int maxx = 0;
int maxy = 0;
for(y = 0; y < im.h; ++y){
for(x = 0; x < im.w; ++x){
if(im.data[y*im.w + x]){
minx = (x < minx) ? x : minx;
miny = (y < miny) ? y : miny;
maxx = (x > maxx) ? x : maxx;
maxy = (y > maxy) ? y : maxy;
}
}
}
box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
box b = bound_image(sized);
if(b.w > 0){
image crop = crop_image(sized, b.x, b.y, b.w, b.h);
image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
int j;
for(j = 0; j < mw*mh; ++j){
truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
}
truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
free_image(crop);
free_image(mask);
++i;
}
free_image(sized);
free(rle);
}
fclose(file);
free_image(part);
}
void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, "raw", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > num_boxes) count = num_boxes;
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if ((w < .001 || h < .001)) continue;
truth[i*5+0] = x;
truth[i*5+1] = y;
truth[i*5+2] = w;
truth[i*5+3] = h;
truth[i*5+4] = id;
}
free(boxes);
}
#define NUMCHARS 37
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
}
}
if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_regression_labels_paths(char **paths, int n)
{
matrix y = make_matrix(n, 1);
int i;
for(i = 0; i < n; ++i){
char labelpath[4096];
find_replace(paths[i], "images", "targets", labelpath);
find_replace(labelpath, "JPEGImages", "targets", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
fscanf(file, "%f", &(y.vals[i][0]));
fclose(file);
}
return y;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "imgs", "labels", label);
find_replace(label, "_iconl.jpeg", ".txt", label);
FILE *file = fopen(label, "r");
if(!file){
find_replace(label, "labels", "labels2", label);
file = fopen(label, "r");
if(!file) continue;
}
++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
printf("%d/%d\n", count, n);
return y;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
}
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
for(i = 0; i < w*h; ++i){
if(part.data[i]) mask.data[w*h*classes + i] = 0;
}
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y.rows = n;
d.y.cols = h*w*classes/div/div;
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
free_image(orig);
free_image(mask);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*30;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
float dh = jitter * orig.h;
float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh));
float scale = rand_uniform(.25, 2);
float nw, nh;
if(new_ar < 1){
nh = scale * h;
nw = nh * new_ar;
} else {
nw = scale * w;
nh = nw / new_ar;
}
float dx = rand_uniform(0, w - nw);
float dy = rand_uniform(0, h - nh);
place_image(orig, nw, nh, dx, dy, sized);
random_distort_image(sized, hue, saturation, exposure);
int flip = rand()%2;
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h);
free_image(orig);
}
free(random_paths);
return d;
}
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == REGRESSION_DATA){
*a.d = load_data_regression(a.paths, a.n, a.m, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == INSTANCE_DATA){
*a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SEGMENTATION_DATA){
*a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
} else if (a.type == LETTERBOX_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data *buffers = calloc(args.threads, sizeof(data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
void load_data_blocking(load_args args)
{
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
load_thread(ptr);
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_regression(char **paths, int n, int m, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n);
if(m) free(paths);
return d;
}
data select_data(data *orig, int *inds)
{
data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
d.X.rows = orig[0].X.rows;
d.y.rows = orig[0].X.rows;
d.X.cols = orig[0].X.cols;
d.y.cols = orig[0].y.cols;
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
int i;
for(i = 0; i < d.X.rows; ++i){
d.X.vals[i] = orig[inds[i]].X.vals[i];
d.y.vals[i] = orig[inds[i]].y.vals[i];
}
return d;
}
data *tile_data(data orig, int divs, int size)
{
data *ds = calloc(divs*divs, sizeof(data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
d.X.rows = orig.X.rows;
d.X.cols = d.w*d.h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
}
return ds;
}
data resize_data(data orig, int w, int h)
{
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
int i;
d.X.rows = orig.X.rows;
d.X.cols = w*h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
d.w = d1.w;
d.h = d1.h;
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = rand()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i+b*10000][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = rand()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
data copy_data(data d)
{
data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
c.num_boxes = d.num_boxes;
c.boxes = d.boxes;
c.X = copy_matrix(d.X);
c.y = copy_matrix(d.y);
return c;
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = calloc(num, sizeof(float *));
r.y.vals = calloc(num, sizeof(float *));
int i;
for(i = 0; i < num; ++i){
int index = rand()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data *split = calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = calloc(train.X.rows, sizeof(float*));
test.X.vals = calloc(test.X.rows, sizeof(float*));
train.y.vals = calloc(train.y.rows, sizeof(float*));
test.y.vals = calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
ofmo-ifc4c.c | /**
* @file ofmo-ifc4c.c 同じタイプの3中心クーロン積分を行う関数群
* */
/**
* @defgroup integ-ifc4c 4中心クーロン積分を行う関数群
*
* 同じタイプの4中心クーロン積分を行い、環境ポテンシャル項に加算する
* 関数群。
*
* すべての関数が同じ引数をもっているので、以下にその内容を示す。
*
* @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数
* @param[in] workerid 各ワーカプロセス(スレッド)のID
* (\f$ 0\le\tt{workerid}<\tt{nworkers} \f$)
* @param[in] La 1つ目の軌道量子数
* @param[in] Lb 2つ目の軌道量子数(\f$ \tt{La} \ge \tt{Lb} \f$)
* @param[in] Lc 3つ目の軌道量子数
* @param[in] Ld 4つ目の軌道量子数(\f$ \tt{Lc} \ge \tt{Ld} \f$)
* @param[in] shel_atm_frg[ics] フラグメントのCS番号 \c ics のCSが属する
* 原子の番号
* @param[in] shel_ini_frg[ics] フラグメントのCS番号 \c ics のCSに
* 含まれるAOの先頭AO番号
* @param[in] atom_x_frg[iat] フラグメントの原子の番号 \c iat のx座標
* (au単位)
* @param[in] atom_y_frg[iat] フラグメントの原子の番号 \c iat のy座標
* (au単位)
* @param[in] atom_z_frg[iat] フラグメントの原子の番号 \c iat のz座標
* (au単位)
* @param[in] leading_cs_pair_frg[itype] フラグメントのCSペアタイプ番号
* \c itype の先頭CSペア番号
* @param[int] csp_schwarz_frg[icsp] フラグメントのCSペア番号 \c icsp の
* Schwarz積分の値
* @param[in] csp_ics_frg[icsp] フラグメントのCSペア番号 \c icsp の
* 1つ目のCS番号
* @param[in] csp_jcs_frg[icsp] フラグメントのCSペア番号 \c icsp の
* 2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[in] csp_leading_ps_pair_frg[icsp] フラグメントのCSペア番号
* \c icsp に含まれるPSペアの先頭PSペア番号
* @param[in] psp_zeta_frg[ipsp] フラグメントのPSペア番号 \c ipsp の
* 軌道指数和\f$ \zeta = \zeta_a + \zeta_b \f$
* @param[in] psp_dkps_frg[ipsp] フラグメントのPSペア番号 \c ipsp
* の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[in] psp_xiza_frg[ipsp] フラグメントのPSペア番号 \c ipsp の
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
*
* @param[in] shel_atm_mon[ics] 相手モノマーのCS番号 \c ics のCSが属する
* 原子の番号
* @param[in] shel_ini_mon[ics] 相手モノマーのCS番号 \c ics のCSに
* 含まれるAOの先頭AO番号
* @param[in] atom_x_mon[iat] 相手モノマーの原子の番号 \c iat のx座標
* (au単位)
* @param[in] atom_y_mon[iat] 相手モノマーの原子の番号 \c iat のy座標
* (au単位)
* @param[in] atom_z_mon[iat] 相手モノマーの原子の番号 \c iat のz座標
* (au単位)
* @param[in] leading_cs_pair_mon[itype] 相手モノマーのCSペアタイプ番号
* \c itype の先頭CSペア番号
* @param[int] csp_schwarz_mon[icsp] 相手モノマーのCSペア番号 \c icsp の
* Schwarz積分の値
* @param[in] csp_ics_mon[icsp] 相手モノマーのCSペア番号 \c icsp の
* 1つ目のCS番号
* @param[in] csp_jcs_mon[icsp] 相手モノマーのCSペア番号 \c icsp の
* 2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[in] csp_leading_ps_pair_mon[icsp] 相手モノマーのCSペア番号
* \c icsp に含まれるPSペアの先頭PSペア番号
* @param[in] psp_zeta_mon[ipsp] 相手モノマーのPSペア番号 \c ipsp の
* 軌道指数和\f$ \zeta = \zeta_a + \zeta_b \f$
* @param[in] psp_dkps_mon[ipsp] 相手モノマーのPSペア番号 \c ipsp
* の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[in] psp_xiza_mon[ipsp] 相手モノマーのPSペア番号 \c ipsp の
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
*
* @param[in] D_mon[] 相手モノマーの密度行列(正方行列形式)
*
* @param[out] V_frg[] この関数で計算した4中心クーロンポテンシャルが
* 加算された環境ポテンシャル項(圧縮U形式)
*
* @attention
* 引数のV_frg[]がスレッド毎に異なる領域をさしている場合には、
* そのままスレッド並列に対応できる。
*
* @ingroup integ-med
* */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define MAXPSPAIR 100
#define ZERO 0.e0
#define ONE 1.e0
#define TWO 2.e0
#define FOUR 4.e0
#define HALF 0.5e0
#define EPS_ERI 1.e-15
#define EPS_PS4 1.e-30
#include "ofmo-twoint-core.h"
/** (ss,ss)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ssss__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, jcs, jat, jao;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double SSSS[1];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
IJ = ((iao*iao+iao)>>1) + jao;
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
KL = ((kao*kao+kao)>>1) + lao;
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ssss__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, SSSS );
coe = (kao==lao? ONE : TWO );
if ( fabs(SSSS[0]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * SSSS[0];
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ssss_
/** (ss,ps)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ssps__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd;
double SSPS[3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
IJ = ((iao*iao+iao)>>1) + jao;
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_psss__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, SSPS );
for ( k=0, kao=kao0; k<3; k++, kao++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(SSPS[k]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * SSPS[k];
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ssps_
/** (ss,pp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_sspp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, l, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd, coe;
double SSPP[3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
IJ = ((iao*iao+iao)>>1) + jao;
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_ppss__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, SSPP );
for ( k=0, kao=kao0, ix=0; k<3; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
KL = K2 + lao;
coe = (kao==lao? ONE : TWO );
if ( fabs(SSPP[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * SSPP[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_sspp_
/** (ss,ds)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ssds__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd;
double SSDS[6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
IJ = ((iao*iao+iao)>>1) + jao;
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_dsss__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, SSDS );
for ( k=0, kao=kao0; k<6; k++, kao++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(SSDS[k]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * SSDS[k];
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ssds_
/** (ss,dp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ssdp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, l, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd;
double SSDP[6*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
IJ = ((iao*iao+iao)>>1) + jao;
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpss__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, SSDP );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
KL = K2 + lao;
if ( fabs(SSDP[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * SSDP[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ssdp_
/** (ss,dd)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ssdd__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, l, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd, coe;
double SSDD[6*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
IJ = ((iao*iao+iao)>>1) + jao;
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddss__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, SSDD );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<6; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
KL = K2 + lao;
coe = (kao==lao? ONE : TWO );
if ( fabs(SSDD[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * SSDD[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ssdd_
/** (ps,ss)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_psss__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double PSSS[3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
KL = ((kao*kao+kao)>>1) + lao;
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_psss__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, PSSS );
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0; i<3; i++, iao++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
if ( fabs(PSSS[i]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * PSSS[i];
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_psss_
/** (ps,ps)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_psps__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double PSPS[3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_psps__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, PSPS );
for ( i=0, iao=iao0, ix=0; i<3; i++, iao++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
for ( k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(PSPS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * PSPS[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_psps_
/** (ps,pp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_pspp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, l, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd, coe;
double PSPP[3*3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_ppps__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, PSPP );
for ( k=0, kao=kao0, ix=0; k<3; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++ ) {
if ( lao>kao ) { ix+=3; continue; }
KL = K2 + lao;
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0; i<3; i++, iao++, ix++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
if ( fabs(PSPP[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * PSPP[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_pspp_
/** (ps,ds)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_psds__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd;
double PSDS[3*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_dsps__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, PSDS );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
KL = ((kao*kao+kao)>>1) + lao;
for ( i=0, iao=iao0; i<3; i++, iao++, ix++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
if ( fabs(PSDS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * PSDS[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_psds_
/** (ps,dp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_psdp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, l, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd;
double PSDP[3*6*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpps__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, PSDP );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++ ) {
KL = K2 + lao;
for ( i=0, iao=iao0; i<3; i++, iao++, ix++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
if ( fabs(PSDP[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * PSDP[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_psdp_
/** (ps,dd)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_psdd__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, l, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd, coe;
double PSDD[3*6*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddps__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, PSDD );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<6; l++, lao++ ) {
if ( lao>kao ) { ix+=3; continue; }
KL = K2 + lao;
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0; i<3; i++, iao++, ix++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
if ( fabs(PSDD[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * PSDD[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_psdd_
/** (pp,ss)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ppss__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, I2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double PPSS[3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
KL = ((kao*kao+kao)>>1) + lao;
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ppss__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, PPSS );
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0, ix=0; i<3; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++, ix++ ) {
if ( jao>iao ) continue;
IJ = I2 + jao;
if ( fabs(PPSS[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * PPSS[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ppss_
/** (pp,ps)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ppps__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, I2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double PPPS[3*3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ppps__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, PPPS );
for ( i=0, iao=iao0, ix=0; i<3; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
if ( jao>iao ) { ix+=3; continue; }
IJ = I2 + jao;
for ( k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(PPPS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * PPPS[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ppps_
/** (pp,pp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_pppp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, l, I2, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double PPPP[3*3*3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_pppp__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, PPPP );
for ( i=0, iao=iao0, ix=0; i<3; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
if ( jao>iao ) { ix+=3*3; continue; }
IJ = I2 + jao;
for ( k=0, kao=kao0; k<3; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
coe = (kao==lao? ONE : TWO );
KL = K2 + lao;
if ( fabs(PPPP[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * PPPP[ix];
}
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_pppp_
/** (pp,ds)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ppds__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, I2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd;
double PPDS[3*3*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_dspp__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, PPDS );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
KL = ((kao*kao+kao)>>1) + lao;
for ( i=0, iao=iao0; i<3; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++, ix++ ) {
if ( jao>iao ) continue;
IJ = I2 + jao;
if ( fabs(PPDS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * PPDS[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ppds_
/** (pp,dp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ppdp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, l, I2, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd;
double PPDP[3*3*6*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_dppp__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, PPDP );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++ ) {
KL = K2 + lao;
for ( i=0, iao=iao0; i<3; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++, ix++ ) {
if ( jao>iao ) continue;
IJ = I2 + jao;
if ( fabs(PPDP[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * PPDP[ix];
}
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ppdp_
/** (pp,dd)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ppdd__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, l, I2, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd, coe;
double PPDD[3*3*6*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddpp__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, PPDD );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<6; l++, lao++ ) {
if ( lao>kao ) { ix+=3*3; continue; }
KL = K2 + lao;
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0; i<3; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++, ix++ ) {
if ( jao>iao ) continue;
IJ = I2 + jao;
if ( fabs(PPDD[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * PPDD[ix];
}
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ppdd_
/** (ds,ss)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dsss__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double DSSS[6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
KL = ((kao*kao+kao)>>1) + lao;
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dsss__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DSSS );
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0; i<6; i++, iao++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
if ( fabs(DSSS[i]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * DSSS[i];
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dsss_
/** (ds,ps)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dsps__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double DSPS[6*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dsps__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DSPS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
for ( k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(DSPS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * DSPS[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dsps_
/** (ds,pp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dspp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, l, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double DSPP[6*3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dspp__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DSPP );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
for ( k=0, kao=kao0; k<3; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
coe = (kao==lao? ONE : TWO );
KL = K2 + lao;
if ( fabs(DSPP[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * DSPP[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dspp_
/** (ds,ds)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dsds__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double DSDS[6*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dsds__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DSDS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
for ( k=0, kao=kao0; k<6; k++, kao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(DSDS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * DSDS[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dsds_
/** (ds,dp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dsdp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, l, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd;
double DSDP[6*6*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpds__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, DSDP );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++ ) {
KL = K2 + lao;
for ( i=0, iao=iao0; i<6; i++, iao++, ix++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
if ( fabs(DSDP[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * DSDP[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dsdp_
/** (ds,dd)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dsdd__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, k, l, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd, coe;
double DSDD[6*6*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddds__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, DSDD );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<6; l++, lao++ ) {
if ( lao>kao ) { ix+=6; continue; }
KL = K2 + lao;
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0; i<6; i++, iao++, ix++ ) {
IJ = ((iao*iao+iao)>>1) + jao;
if ( fabs(DSDD[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * DSDD[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dsdd_
/** (dp,ss)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dpss__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, I2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double DPSS[6*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
KL = ((kao*kao+kao)>>1) + lao;
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpss__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DPSS );
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++, ix++ ) {
IJ = I2 + jao;
if ( fabs(DPSS[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * DPSS[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dpss_
/** (dp,ps)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dpps__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, I2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double DPPS[6*3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpps__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DPPS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
IJ = I2 + jao;
for ( k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(DPPS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * DPPS[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dpps_
/** (dp,pp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dppp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, l, I2, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double DPPP[6*3*3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dppp__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DPPP );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
IJ = I2 + jao;
for ( k=0, kao=kao0; k<3; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
coe = (kao==lao? ONE : TWO );
KL = K2 + lao;
if ( fabs(DPPP[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * DPPP[ix];
}
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dppp_
/** (dp,ds)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dpds__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, I2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double DPDS[6*3*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpds__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DPDS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
IJ = I2 + jao;
for ( k=0, kao=kao0; k<6; k++, kao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(DPDS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * DPDS[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dpds_
/** (dp,dp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dpdp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, l, I2, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double DPDP[6*3*6*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dpdp__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DPDP );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++ ) {
IJ = I2 + jao;
for ( k=0, kao=kao0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
KL = K2 + lao;
if ( fabs(DPDP[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * DPDP[ix];
}
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dpdp_
/** (dp,dd)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dpdd__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, l, I2, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], CA[3];
double val_ab, val_cd, coe;
double DPDD[6*3*6*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
CA[i] = C[i] - A[i];
DC[i] = D[i] - C[i];
}
twoint_core_dddp__(
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC,
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA, CA, DPDD );
for ( k=0, kao=kao0, ix=0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<6; l++, lao++ ) {
if ( lao>kao ) { ix+=6*3; continue; }
KL = K2 + lao;
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<3; j++, jao++, ix++ ) {
IJ = I2 + jao;
if ( fabs(DPDD[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * DPDD[ix];
}
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dpdd_
/** (dd,ss)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ddss__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, I2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double DDSS[6*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
KL = ((kao*kao+kao)>>1) + lao;
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddss__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DDSS );
coe = (kao==lao? ONE : TWO );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<6; j++, jao++, ix++ ) {
if ( jao>iao ) continue;
IJ = I2 + jao;
if ( fabs(DDSS[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * DDSS[ix];
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ddss_
/** (dd,ps)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ddps__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, I2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double DDPS[6*6*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddps__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DDPS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix+=3; continue; }
IJ = I2 + jao;
for ( k=0, kao=kao0; k<3; k++, kao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(DDPS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * DDPS[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ddps_
/** (dd,pp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ddpp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, l, I2, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double DDPP[6*6*3*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddpp__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DDPP );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix+=3*3; continue; }
IJ = I2 + jao;
for ( k=0, kao=kao0; k<3; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
coe = (kao==lao? ONE : TWO );
KL = K2 + lao;
if ( fabs(DDPP[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * DDPP[ix];
}
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ddpp_
/** (dd,ds)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_ddds__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, I2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double DDDS[6*6*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_ddds__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DDDS );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix+=6; continue; }
IJ = I2 + jao;
for ( k=0, kao=kao0; k<6; k++, kao++, ix++ ) {
KL = ((kao*kao+kao)>>1) + lao;
if ( fabs(DDDS[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * DDDS[ix];
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_ddds_
/** (dd,dp)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dddp__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, l, I2, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd;
double DDDP[6*6*6*3];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dddp__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DDDP );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix+=6*3; continue; }
IJ = I2 + jao;
for ( k=0, kao=kao0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<3; l++, lao++, ix++ ) {
KL = K2 + lao;
if ( fabs(DDDP[ix]) > EPS_ERI ) {
V_frg[IJ] += TWO * D_mon[KL] * DDDP[ix];
}
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dddp_
/** (dd,dd)タイプの4中心クーロンポテンシャル項をまとめて計算する
* @ingroup integ-ifc4c
* */
int ofmo_ifc4c_dddd__(
// parallelization
const int *pnworkers, const int *pworkerid,
// integral type data
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
// basis and cutoff table data for fragment
const int shel_atm_frg[], const int shel_ini_frg[],
const double atom_x_frg[], const double atom_y_frg[],
const double atom_z_frg[], const int leading_cs_pair_frg[],
const double csp_schwarz_frg[],
const int csp_ics_frg[], const int csp_jcs_frg[],
const int csp_leading_ps_pair_frg[],
const double psp_zeta_frg[], const double psp_dkps_frg[],
const double psp_xiza_frg[],
// basis and cutoff table data for monomer
const int shel_atm_mon[], const int shel_ini_mon[],
const double atom_x_mon[], const double atom_y_mon[],
const double atom_z_mon[],
const int leading_cs_pair_mon[],
const double csp_schwarz_mon[],
const int csp_ics_mon[], const int csp_jcs_mon[],
const int csp_leading_ps_pair_mon[],
const double psp_zeta_mon[], const double psp_dkps_mon[],
const double psp_xiza_mon[],
// density matrix of monomer
const double D_mon[],
// (output) Coulomb potential
double V_frg[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int Lab, Lcd, IJ, KL, i, j, k, l, I2, K2, ix;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe;
double DDDD[6*6*6*6];
//
int ixx, ncsp, dx, res, pos;
Lab = La * (La+1)/2 + Lb;
Lcd = Lc * (Lc+1)/2 + Ld;
if ( nworkers < 0 ) {
pos = workerid;
ncsp = leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab];
dx = (ncsp>>7); // >>5 means /32
res = (ncsp & 0x007f);// residues in division by 32
ijcs0 = leading_cs_pair_frg[Lab]
+ (pos<res ? pos*(dx+1) : pos*dx+res );
ijcs1 = ijcs0 + ( pos<res ? dx+1 : dx );
ixx = 1;
} else {
ijcs0 = leading_cs_pair_frg[Lab] + workerid;
ijcs1 = leading_cs_pair_frg[Lab+1];
ixx = nworkers;
}
klcs0 = leading_cs_pair_mon[Lcd];
klcs1 = leading_cs_pair_mon[Lcd+1];
for ( ijcs=ijcs0; ijcs<ijcs1; ijcs+=ixx ) {
val_ab = csp_schwarz_frg[ijcs];
ics = csp_ics_frg[ijcs];
jcs = csp_jcs_frg[ijcs];
ijps0 = csp_leading_ps_pair_frg[ijcs];
nijps = csp_leading_ps_pair_frg[ijcs+1]-ijps0;
iat = shel_atm_frg[ics];
jat = shel_atm_frg[jcs];
iao0 = shel_ini_frg[ics];
jao0 = shel_ini_frg[jcs];
A[0]=atom_x_frg[iat]; A[1]=atom_y_frg[iat]; A[2]=atom_z_frg[iat];
B[0]=atom_x_frg[jat]; B[1]=atom_y_frg[jat]; B[2]=atom_z_frg[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
//#pragma omp for schedule(guided)
for ( klcs=klcs0; klcs<klcs1; klcs++ ) {
val_cd = csp_schwarz_mon[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics_mon[klcs];
lcs = csp_jcs_mon[klcs];
klps0 = csp_leading_ps_pair_mon[klcs];
nklps = csp_leading_ps_pair_mon[klcs+1]-klps0;
kat = shel_atm_mon[kcs];
lat = shel_atm_mon[lcs];
kao0 = shel_ini_mon[kcs];
lao0 = shel_ini_mon[lcs];
C[0]=atom_x_mon[kat]; C[1]=atom_y_mon[kat]; C[2]=atom_z_mon[kat];
D[0]=atom_x_mon[lat]; D[1]=atom_y_mon[lat]; D[2]=atom_z_mon[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
twoint_core_dddd__(
&nijps, &psp_zeta_frg[ijps0], &psp_dkps_frg[ijps0],
&psp_xiza_frg[ijps0], BA,
&nklps, &psp_zeta_mon[klps0], &psp_dkps_mon[klps0],
&psp_xiza_mon[klps0], DC, AC, DDDD );
for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<6; j++, jao++ ) {
if ( jao>iao ) { ix+=6*6; continue; }
IJ = I2 + jao;
for ( k=0, kao=kao0; k<6; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<6; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
coe = (kao==lao? ONE : TWO );
KL = K2 + lao;
if ( fabs(DDDD[ix]) > EPS_ERI ) {
V_frg[IJ] += coe * D_mon[KL] * DDDD[ix];
}
}
}
}
}
} // klcs
} // ijcs
return 0;
} // end of ofmo_ifc4c_dddd_
|
omp_master_3.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_master_3()
{
int nthreads;
int executing_thread;
int tid_result = 0; /* counts up the number of wrong thread no. for
the master thread. (Must be 0) */
nthreads = 0;
executing_thread = -1;
#pragma omp parallel
{
#pragma omp master
{
int tid = omp_get_thread_num();
if (tid != 0) {
#pragma omp critical
{ tid_result++; }
}
#pragma omp critical
{
nthreads++;
}
executing_thread = omp_get_thread_num ();
} /* end of master*/
} /* end of parallel*/
return ((nthreads == 1) && (executing_thread == 0) && (tid_result == 0));
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_master_3()) {
num_failed++;
}
}
return num_failed;
}
|
omp_sections_nowait.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
/*
* This test will hang if the nowait is not working properly
*
* It relies on a thread skipping to the second sections construct to
* release the threads in the first sections construct
*
* Also, since scheduling of sections is implementation defined, it is
* necessary to have all four sections in the second sections construct
* release the threads since we can't guarantee which section a single thread
* will execute.
*/
volatile int release;
volatile int count;
void wait_for_release_then_increment(int rank)
{
fprintf(stderr, "Thread nr %d enters first section"
" and waits.\n", rank);
while (release == 0);
#pragma omp atomic
count++;
}
void release_and_increment(int rank)
{
fprintf(stderr, "Thread nr %d sets release to 1\n", rank);
release = 1;
#pragma omp flush(release)
#pragma omp atomic
count++;
}
int test_omp_sections_nowait()
{
release = 0;
count = 0;
#pragma omp parallel num_threads(4)
{
int rank;
rank = omp_get_thread_num ();
#pragma omp sections nowait
{
#pragma omp section
{
wait_for_release_then_increment(rank);
}
#pragma omp section
{
wait_for_release_then_increment(rank);
}
#pragma omp section
{
wait_for_release_then_increment(rank);
}
#pragma omp section
{
fprintf(stderr, "Thread nr %d enters first sections and goes "
"immediately to next sections construct to release.\n", rank);
#pragma omp atomic
count++;
}
}
/* Begin of second sections environment */
#pragma omp sections
{
#pragma omp section
{
release_and_increment(rank);
}
#pragma omp section
{
release_and_increment(rank);
}
#pragma omp section
{
release_and_increment(rank);
}
#pragma omp section
{
release_and_increment(rank);
}
}
}
// Check to make sure all eight sections were executed
return (count==8);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_sections_nowait()) {
num_failed++;
}
}
return num_failed;
}
|
middle6r.c | /*
* Date: 11 December 2015
* Contact: Thomas Peyrin - thomas.peyrin@gmail.com
*/
/*
* Simulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: hsn.hadipour@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
// #define DEBUG 1
#define Nthreads 4 // Number of parallel threads utilized in this program
#define NumOfExperiments 128 // Number of independent experiments
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void init_prng(int offset) {
// unsigned int initial_seed = 0x5ED90662;
// unsigned int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = 10*time(NULL) + 11*offset;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
int num = 0;
for (int t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = rand() & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
int ID = omp_get_thread_num();
init_prng(ID);
for (int j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, dk1, dk2);
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
// init_prng(1);
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int R = 6; // Number of rounds
int ver = 5; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "00000000000000000000004000000000";
char dc_str[] = "00000000000000000000000000000000";
char dk1_str[] = "00000000000000000000000000002a00000000000000000000000000000099000000000000000000000000000000f300";
char dk2_str[] = "000000000000000000000054000000000000000000000000000000f30000000000000000000000000000007f00000000";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of parallel threads : N1
int deg1 = 22;
int deg2 = 0;
int N2 = 1 << deg1; // Number of bunches per thread : N2 = 2^(deg)
int N3 = 1 << deg2; // Number of queries per bunch : N3
//################### Number of total queries : N1*N2*N3 ###############
char all_results[NumOfExperiments][20];
double sum = 0;
double sum_temp = 0;
for (int i = 0; i < NumOfExperiments; i++)
{
printf("Experiment Number %d:\n", i);
sum_temp = send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
sum += sum_temp;
sum_temp = (double)(N1 * N2 * N3) / sum_temp;
sprintf(all_results[i], "2^(-%0.2f), ", log(sum_temp) / log(2));
}
printf("A summary of all results:\n");
for (int i = 0; i < NumOfExperiments; i++)
{
printf("%s", all_results[i]);
}
printf("\n##########################\nAverage = 2^(-%0.4f)\n",
(log(NumOfExperiments) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
return 0;
}
|
ztrmm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs a triangular matrix-matrix multiply of the form
*
* \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or
* \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight
*
* where op( X ) is one of:
*
* - op(A) = A or
* - op(A) = A^T or
* - op(A) = A^H
*
* alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper
* or lower triangular matrix.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] m
* The number of rows of matrix B.
* m >= 0.
*
* @param[in] n
* The number of columns of matrix B.
* n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* The triangular matrix A of dimension lda-by-k, where k is m when
* side='L' or 'l' and k is n when when side='R' or 'r'. If uplo =
* PlasmaUpper, the leading k-by-k upper triangular part of the array
* A contains the upper triangular matrix, and the strictly lower
* triangular part of A is not referenced. If uplo = PlasmaLower, the
* leading k-by-k lower triangular part of the array A contains the
* lower triangular matrix, and the strictly upper triangular part of
* A is not referenced. If diag = PlasmaUnit, the diagonal elements of
* A are also not referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. When side='L' or 'l',
* lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n).
*
* @param[in,out] pB
* On entry, the matrix B of dimension ldb-by-n.
* On exit, the result of a triangular matrix-matrix multiply
* ( alpha*op(A)*B ) or ( alpha*B*op(A) ).
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_ztrmm
* @sa plasma_ctrmm
* @sa plasma_dtrmm
* @sa plasma_strmm
*
******************************************************************************/
int plasma_ztrmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
plasma_complex64_t alpha, plasma_complex64_t *pA, int lda,
plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
return -1;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -2;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans )
{
plasma_error("illegal value of transa");
return -3;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
return -4;
}
if (m < 0) {
plasma_error("illegal value of m");
return -5;
}
if (n < 0) {
plasma_error("illegal value of n");
return -6;
}
int na;
if (side == PlasmaLeft)
na = m;
else
na = n;
if (lda < imax(1, na)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
na, na, 0, 0, na, na, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate matrices to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, sequence, &request);
// Call tile async interface.
plasma_omp_ztrmm(side, uplo, transa, diag,
alpha, A,
B,
sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs triangular matrix multiplication. Non-blocking tile version of
* plasma_ztrmm(). May return before the computation is finished. Operates on
* matrices stored by tiles. All matrices are passed through descriptors. All
* dimensions are taken from the descriptors. Allows for pipelining of
* operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of the triangular matrix A.
*
* @param[in,out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_ztrmm
* @sa plasma_omp_ctrmm
* @sa plasma_omp_dtrmm
* @sa plasma_omp_strmm
*
******************************************************************************/
void plasma_omp_ztrmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
plasma_complex64_t alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorNotInitialized);
return;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0 || alpha == 0.0 || B.m == 0 || B.n == 0)
return;
// Call parallel function.
plasma_pztrmm(side, uplo, transa, diag, alpha,
A, B,
sequence, request);
}
|
reduction-7.c | char z[10] = { 0 };
__attribute__((noinline, noclone)) void
foo (int (*x)[3][2], int *y, long w[1][2])
{
unsigned long long a[9] = {};
short b[5] = {};
int i;
#pragma omp parallel for reduction(+:x[0:2][:][0:2], z[:4]) \
reduction(*:y[:3]) reduction(|:a[:4]) \
reduction(&:w[0:1][:2]) reduction(max:b)
for (i = 0; i < 128; i++)
{
x[i / 64][i % 3][(i / 4) & 1] += i;
if ((i & 15) == 1)
y[0] *= 3;
if ((i & 31) == 2)
y[1] *= 7;
if ((i & 63) == 3)
y[2] *= 17;
z[i / 32] += (i & 3);
if (i < 4)
z[i] += i;
a[i / 32] |= 1ULL << (i & 30);
w[0][i & 1] &= ~(1L << (i / 17 * 3));
if ((i % 79) > b[0])
b[0] = i % 79;
if ((i % 13) > b[1])
b[1] = i % 13;
if ((i % 23) > b[2])
b[2] = i % 23;
if ((i % 85) > b[3])
b[3] = i % 85;
if ((i % 192) > b[4])
b[4] = i % 192;
}
for (i = 0; i < 9; i++)
if (a[i] != (i < 4 ? 0x55555555ULL : 0))
__builtin_abort ();
if (b[0] != 78 || b[1] != 12 || b[2] != 22 || b[3] != 84 || b[4] != 127)
__builtin_abort ();
}
int
main ()
{
int a[4][3][2] = {};
static int a2[4][3][2] = {{{ 0, 0 }, { 0, 0 }, { 0, 0 }},
{{ 312, 381 }, { 295, 356 }, { 337, 335 }},
{{ 1041, 975 }, { 1016, 1085 }, { 935, 1060 }},
{{ 0, 0 }, { 0, 0 }, { 0, 0 }}};
int y[5] = { 0, 1, 1, 1, 0 };
int y2[5] = { 0, 6561, 2401, 289, 0 };
char z2[10] = { 48, 49, 50, 51, 0, 0, 0, 0, 0, 0 };
long w[1][2] = { ~0L, ~0L };
foo (&a[1], y + 1, w);
if (__builtin_memcmp (a, a2, sizeof (a))
|| __builtin_memcmp (y, y2, sizeof (y))
|| __builtin_memcmp (z, z2, sizeof (z))
|| w[0][0] != ~0x249249L
|| w[0][1] != ~0x249249L)
__builtin_abort ();
return 0;
}
|
convolution_4x4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv4x4s4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 4 * outw + w * 3;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 16 + q * 16;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0 + 4);
float32x4_t _k891011 = vld1q_f32(kernel0 + 8);
float32x4_t _k12131415 = vld1q_f32(kernel0 + 12);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v7.4s}, [%1] \n" // v7 = outptr
"ld1 {v8.4s}, [%2], #16 \n" // v8 = r0
"ld1 {v9.4s}, [%3], #16 \n" // v9 = r1
"prfm pldl1keep, [%4, #512] \n"
"prfm pldl1keep, [%5, #512] \n"
"fmul v12.4s, v8.4s, %12.4s \n"
"fmul v13.4s, v9.4s, %13.4s \n"
"ld1 {v10.4s}, [%4], #16 \n" // v10 = r2
"ld1 {v11.4s}, [%5], #16 \n" // v11 = r3
"fmla v12.4s, v10.4s, %14.4s \n"
"fmla v13.4s, v11.4s, %15.4s \n"
"fadd v5.4s, v12.4s, v13.4s \n"
"ld1 {v8.4s}, [%2], #16 \n" // v8 = r0
"ld1 {v9.4s}, [%3], #16 \n" // v9 = r1
"fmul v12.4s, v8.4s, %12.4s \n"
"fmul v13.4s, v9.4s, %13.4s \n"
"ld1 {v10.4s}, [%4], #16 \n" // v10 = r2
"ld1 {v11.4s}, [%5], #16 \n" // v11 = r3
"fmla v12.4s, v10.4s, %14.4s \n"
"fmla v13.4s, v11.4s, %15.4s \n"
"fadd v6.4s, v12.4s, v13.4s \n"
"ld1 {v8.4s}, [%2], #16 \n" // v8 = r0
"ld1 {v9.4s}, [%3], #16 \n" // v9 = r1
"fmul v12.4s, v8.4s, %12.4s \n"
"fmul v13.4s, v9.4s, %13.4s \n"
"ld1 {v10.4s}, [%4], #16 \n" // v10 = r2
"ld1 {v11.4s}, [%5], #16 \n" // v11 = r3
"fmla v12.4s, v10.4s, %14.4s \n"
"fmla v13.4s, v11.4s, %15.4s \n"
"fadd v14.4s, v12.4s, v13.4s \n"
"faddp v5.4s, v5.4s, v6.4s \n" // Move to here to enhance ILP
"ld1 {v8.4s}, [%2], #16 \n" // v8 = r0
"ld1 {v9.4s}, [%3], #16 \n" // v9 = r1
"fmul v12.4s, v8.4s, %12.4s \n"
"fmul v13.4s, v9.4s, %13.4s \n"
"ld1 {v10.4s}, [%4], #16 \n" // v10 = r2
"ld1 {v11.4s}, [%5], #16 \n" // v11 = r3
"fmla v12.4s, v10.4s, %14.4s \n"
"fmla v13.4s, v11.4s, %15.4s \n"
"fadd v15.4s, v12.4s, v13.4s \n"
// "faddp v5.4s , v5.4s, v6.4s \n" // Move this line upward.
"faddp v14.4s, v14.4s, v15.4s \n"
"faddp v5.4s , v5.4s, v14.4s \n"
"fadd v7.4s, v7.4s, v5.4s \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415) // %15
: "cc", "memory", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"0: \n"
"pld [%2, #512] \n"
"pld [%3, #512] \n"
"vld1.f32 {d14-d15}, [%1] \n" // q7 = outptr
"vld1.f32 {d16-d17}, [%2]! \n" // q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n" // q9 = r1
"pld [%4, #512] \n"
"pld [%5, #512] \n"
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n" // q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n" // q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q5, q12, q13 \n"
"vld1.f32 {d16-d17}, [%2]! \n" // q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n" // q9 = r1
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n" // q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n" // q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q6, q12, q13 \n"
"vld1.f32 {d16-d17}, [%2]! \n" // q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n" // q9 = r1
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n" // q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n" // q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q14, q12, q13 \n"
"vld1.f32 {d16-d17}, [%2]! \n" // q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n" // q9 = r1
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n" // q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n" // q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q15, q12, q13 \n"
"vadd.f32 d10, d10, d11 \n"
"vadd.f32 d28, d28, d29 \n"
"vadd.f32 d11, d12, d13 \n"
"vadd.f32 d29, d30, d31 \n"
"vpadd.f32 d10, d10, d11 \n"
"vpadd.f32 d11, d28, d29 \n"
"vadd.f32 q7, q7, q5 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415) // %15
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float sum = 0.f;
asm volatile(
"ld1 {v8.4s}, [%0], #16 \n" // v8 = r0
"ld1 {v9.4s}, [%1], #16 \n" // v9 = r1
"fmul v12.4s, v8.4s, %9.4s \n"
"fmul v13.4s, v9.4s, %10.4s \n"
"ld1 {v10.4s}, [%2], #16 \n" // v10 = r2
"ld1 {v11.4s}, [%3], #16 \n" // v11 = r3
"fmla v12.4s, v10.4s, %11.4s \n"
"fmla v13.4s, v11.4s, %12.4s \n"
"fadd v5.4s, v12.4s, v13.4s \n"
"faddp v5.4s, v5.4s, v5.4s \n"
"faddp s5, v5.2s \n"
"fmov %w4, s5 \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(sum) // %4
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"w"(_k0123), // %9
"w"(_k4567), // %10
"w"(_k891011), // %11
"w"(_k12131415) // %12
: "cc", "memory", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13");
*outptr += sum;
#else
float sum = 0.f;
asm volatile(
"vld1.f32 {d16-d17}, [%0]! \n" // q8 = r0
"vld1.f32 {d18-d19}, [%1]! \n" // q9 = r1
"vmul.f32 q12, q8, %q9 \n"
"vmul.f32 q13, q9, %q10 \n"
"vld1.f32 {d20-d21}, [%2]! \n" // q10 = r2
"vld1.f32 {d22-d23}, [%3]! \n" // q11 = r3
"vmla.f32 q12, q10, %q11 \n"
"vmla.f32 q13, q11, %q12 \n"
"vadd.f32 q5, q12, q13 \n"
"vadd.f32 d10, d10, d11 \n"
"vpadd.f32 d10, d10, d10 \n"
"vmov.f32 %4, d10[0] \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(sum) // %4
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"w"(_k0123), // %9
"w"(_k4567), // %10
"w"(_k891011), // %11
"w"(_k12131415) // %12
: "cc", "memory", "q5", "q6", "q8", "q9", "q10", "q11", "q12", "q13");
*outptr += sum;
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
*outptr += sum;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
#endif // __ARM_NEON
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
}
}
}
}
|
GB_unaryop__identity_uint8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_bool
// op(A') function: GB_tran__identity_uint8_bool
// C type: uint8_t
// A type: bool
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_bool
(
uint8_t *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_uint32
// op(A') function: GB_unop_tran__identity_int32_uint32
// C type: int32_t
// A type: uint32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_uint32
(
int32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_7x7.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv7x7s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
#ifdef __clang__ // __ARM_NEON && __aarch64__ && __clang__
if (nn > 0)
{
asm volatile(
// v0: input / final output
// v1 v2 v3: = ri0 ri4 ri0n , i <- 1-7
// v4 = ri1 / ri3 / ri6
// v5 = ri2 / ri5
// v9 = intermediate sum register
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
//i = 1
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%2] \n"
"add %2, %2, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmul v9.4s, v1.4s, %18.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %18.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %18.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %18.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %19.s[0] \n"
"fmla v0.4s, v5.4s, %19.s[1] \n"
"fmla v9.4s, v4.4s, %19.s[2] \n"
//i = 2
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%3] \n" // v1 v2 v3: = r20 r24 r20n
"add %3, %3, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n" // v4 = r21
"fmla v9.4s, v1.4s, %20.s[0] \n" // *+ r10
"ext v5.16b, v1.16b, v2.16b, #8 \n" // v5 = r22
"fmla v0.4s, v4.4s, %20.s[1] \n" // *+ r11
"ext v4.16b, v1.16b, v2.16b, #12 \n" // v4 = r23
"fmla v9.4s, v5.4s, %20.s[2] \n" // *+ r1
"ext v5.16b, v2.16b, v3.16b, #4 \n" // v5 = r25
"fmla v0.4s, v4.4s, %20.s[3] \n" // *+ r13
"ext v4.16b, v2.16b, v3.16b, #8 \n" // v4 = r26
"fmla v9.4s, v2.4s, %21.s[0] \n" // *+ r14
"fmla v0.4s, v5.4s, %21.s[1] \n" // *+ r15
"fmla v9.4s, v4.4s, %21.s[2] \n" // *+ r16
//i = 3
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%4] \n"
"add %4, %4, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %22.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %22.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %22.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %22.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %23.s[0] \n"
"fmla v0.4s, v5.4s, %23.s[1] \n"
"fmla v9.4s, v4.4s, %23.s[2] \n"
//i = 4
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%5] \n"
"add %5, %5, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %24.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %24.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %24.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %24.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %25.s[0] \n"
"fmla v0.4s, v5.4s, %25.s[1] \n"
"fmla v9.4s, v4.4s, %25.s[2] \n"
//i = 5
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%6] \n"
"add %6, %6, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %26.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %26.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %26.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %26.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %27.s[0] \n"
"fmla v0.4s, v5.4s, %27.s[1] \n"
"fmla v9.4s, v4.4s, %27.s[2] \n"
//i = 6
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%7] \n"
"add %7, %7, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %28.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %28.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %28.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %28.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %29.s[0] \n"
"fmla v0.4s, v5.4s, %29.s[1] \n"
"fmla v9.4s, v4.4s, %29.s[2] \n"
//i = 7
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%8] \n"
"add %8, %8, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %30.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %30.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %30.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %30.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %31.s[0] \n"
"fmla v0.4s, v5.4s, %31.s[1] \n"
"fmla v9.4s, v4.4s, %31.s[2] \n"
"fadd v0.4s, v0.4s, v9.4s \n"
"st1 {v0.4s}, [%1], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6) // %8
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k78910), // %20
"w"(_k11121314), // %21
"w"(_k14151617), // %22
"w"(_k18192021), // %23
"w"(_k21222324), // %24
"w"(_k25262728), // %25
"w"(_k28293031), // %26
"w"(_k32333435), // %27
"w"(_k35363738), // %28
"w"(_k39404142), // %29
"w"(_k42434445), // %30
"w"(_k46474849) // %31
: "cc", "memory","v0", "v1", "v2", "v3", "v4", "v5", "v9"
);
}
#else // __ARM_NEON && __aarch64__ defined, but __clang__ not defined
// When compiled with gcc, gcc does not accept over 30 operands
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3
float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7
float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11
float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4
float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5
float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6
float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8
float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r14 = vld1q_f32(r1 + 4);
float32x4_t _r10n = vld1q_f32(r1 + 8);
float32x4_t _r11 = vextq_f32(_r10, _r14, 1);
float32x4_t _r12 = vextq_f32(_r10, _r14, 2);
float32x4_t _r13 = vextq_f32(_r10, _r14, 3);
float32x4_t _r15 = vextq_f32(_r14, _r10n, 1);
float32x4_t _r16 = vextq_f32(_r14, _r10n, 2);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r24 = vld1q_f32(r2 + 4);
float32x4_t _r20n = vld1q_f32(r2 + 8);
float32x4_t _r21 = vextq_f32(_r20, _r24, 1);
float32x4_t _r22 = vextq_f32(_r20, _r24, 2);
float32x4_t _r23 = vextq_f32(_r20, _r24, 3);
float32x4_t _r25 = vextq_f32(_r24, _r20n, 1);
float32x4_t _r26 = vextq_f32(_r24, _r20n, 2);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r34 = vld1q_f32(r3 + 4);
float32x4_t _r30n = vld1q_f32(r3 + 8);
float32x4_t _r31 = vextq_f32(_r30, _r34, 1);
float32x4_t _r32 = vextq_f32(_r30, _r34, 2);
float32x4_t _r33 = vextq_f32(_r30, _r34, 3);
float32x4_t _r35 = vextq_f32(_r34, _r30n, 1);
float32x4_t _r36 = vextq_f32(_r34, _r30n, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r44 = vld1q_f32(r4 + 4);
float32x4_t _r40n = vld1q_f32(r4 + 8);
float32x4_t _r41 = vextq_f32(_r40, _r44, 1);
float32x4_t _r42 = vextq_f32(_r40, _r44, 2);
float32x4_t _r43 = vextq_f32(_r40, _r44, 3);
float32x4_t _r45 = vextq_f32(_r44, _r40n, 1);
float32x4_t _r46 = vextq_f32(_r44, _r40n, 2);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r54 = vld1q_f32(r5 + 4);
float32x4_t _r50n = vld1q_f32(r5 + 8);
float32x4_t _r51 = vextq_f32(_r50, _r54, 1);
float32x4_t _r52 = vextq_f32(_r50, _r54, 2);
float32x4_t _r53 = vextq_f32(_r50, _r54, 3);
float32x4_t _r55 = vextq_f32(_r54, _r50n, 1);
float32x4_t _r56 = vextq_f32(_r54, _r50n, 2);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _r60 = vld1q_f32(r6);
float32x4_t _r64 = vld1q_f32(r6 + 4);
float32x4_t _r60n = vld1q_f32(r6 + 8);
float32x4_t _r61 = vextq_f32(_r60, _r64, 1);
float32x4_t _r62 = vextq_f32(_r60, _r64, 2);
float32x4_t _r63 = vextq_f32(_r60, _r64, 3);
float32x4_t _r65 = vextq_f32(_r64, _r60n, 1);
float32x4_t _r66 = vextq_f32(_r64, _r60n, 2);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
r6 += 4;
outptr += 4;
}
#endif // __clang__
#else //__aarch32__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d25}, [%1] \n"// _sum
// "veor q13, q13 \n"// _sum2 = 0;
// "veor q14, q14 \n"// _sum3 = 0;
// "veor q15, q15 \n"// _sum4 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3
"vmla.f32 q12, q0, d8[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11
"vmul.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4
"vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8
"vmul.f32 q14, q1, d8[1] \n"
"vmul.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5
"vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #128] \n"
"vld1.f32 {d0-d1}, [%4]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5] \n"
"vmla.f32 q14, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q15, q1, d12[1] \n"
"vmla.f32 q12, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #128] \n"
"vld1.f32 {d0-d1}, [%6]! \n"
"vmla.f32 q12, q0, d8[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6] \n"
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #128] \n"
"vld1.f32 {d0-d1}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%8, #256] \n"
"vld1.f32 {d4-d7}, [%8] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"vst1.f32 {d24-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
outptr++;
}
r0 += 6;
r1 += 6;
r2 += 6;
r3 += 6;
r4 += 6;
r5 += 6;
r6 += 6;
}
}
}
}
static void conv7x7s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
#ifdef __clang__ // __ARM_NEON && __aarch64__ && __clang__
if (nn > 0)
{
asm volatile(
// v0: input / final output
// v1 v2: = _ri0/_ri1 first
// v3 v4: = then _r0_8101214/_r0_9111315
// v5 = ri2 / ri4 / ri6
// v6 = ri3 / ri5
// v9 = intermediate sum register
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
//i = 1
"prfm pldl1keep, [%2, #512] \n"
"ld2 {v1.4s, v2.4s}, [%2] \n" // v1 v2 = _r00 _r01
"add %2, %2, #32 \n"
"ld2 {v3.4s, v4.4s}, [%2] \n" // v3 v4 = _r0_8101214 / _r0_9111315
"fmul v9.4s, v1.4s, %18.s[0] \n" // *+ _r00
"ext v5.16b, v1.16b, v3.16b, #4 \n" // v5 = _r02
"fmla v0.4s, v2.4s, %18.s[1] \n" // *+ _r01
"ext v6.16b, v2.16b, v4.16b, #4 \n" // v6 = _r03
"fmla v9.4s, v5.4s, %18.s[2] \n" // *+ _r02
"ext v5.16b, v1.16b, v3.16b, #8 \n" // v5 = _r04
"fmla v0.4s, v6.4s, %18.s[3] \n" // *+ _r03
"ext v6.16b, v2.16b, v4.16b, #8 \n" // v6 = _r05
"fmla v9.4s, v5.4s, %19.s[0] \n" // *+ _r04
"ext v5.16b, v1.16b, v3.16b, #12 \n" // v5 = _r06
"fmla v0.4s, v6.4s, %19.s[1] \n" // *+ _r05
"fmla v9.4s, v5.4s, %19.s[2] \n" // *+ _r06
//i = 2
"prfm pldl1keep, [%3, #512] \n"
"ld2 {v1.4s, v2.4s}, [%3] \n"
"add %3, %3, #32 \n"
"ld2 {v3.4s, v4.4s}, [%3] \n"
"fmla v9.4s, v1.4s, %20.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %20.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %20.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %20.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %21.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %21.s[1] \n"
"fmla v9.4s, v5.4s, %21.s[2] \n"
//i = 3
"prfm pldl1keep, [%4, #512] \n"
"ld2 {v1.4s, v2.4s}, [%4] \n"
"add %4, %4, #32 \n"
"ld2 {v3.4s, v4.4s}, [%4] \n"
"fmla v9.4s, v1.4s, %22.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %22.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %22.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %22.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %23.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %23.s[1] \n"
"fmla v9.4s, v5.4s, %23.s[2] \n"
//i = 4
"prfm pldl1keep, [%5, #512] \n"
"ld2 {v1.4s, v2.4s}, [%5] \n"
"add %5, %5, #32 \n"
"ld2 {v3.4s, v4.4s}, [%5] \n"
"fmla v9.4s, v1.4s, %24.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %24.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %24.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %24.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %25.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %25.s[1] \n"
"fmla v9.4s, v5.4s, %25.s[2] \n"
//i = 5
"prfm pldl1keep, [%6, #512] \n"
"ld2 {v1.4s, v2.4s}, [%6] \n"
"add %6, %6, #32 \n"
"ld2 {v3.4s, v4.4s}, [%6] \n"
"fmla v9.4s, v1.4s, %26.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %26.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %26.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %26.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %27.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %27.s[1] \n"
"fmla v9.4s, v5.4s, %27.s[2] \n"
//i = 6
"prfm pldl1keep, [%7, #512] \n"
"ld2 {v1.4s, v2.4s}, [%7] \n"
"add %7, %7, #32 \n"
"ld2 {v3.4s, v4.4s}, [%7] \n"
"fmla v9.4s, v1.4s, %28.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %28.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %28.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %28.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %29.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %29.s[1] \n"
"fmla v9.4s, v5.4s, %29.s[2] \n"
//i = 7
"prfm pldl1keep, [%8, #512] \n"
"ld2 {v1.4s, v2.4s}, [%8] \n"
"add %8, %8, #32 \n"
"ld2 {v3.4s, v4.4s}, [%8] \n"
"fmla v9.4s, v1.4s, %30.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %30.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %30.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %30.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %31.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %31.s[1] \n"
"fmla v9.4s, v5.4s, %31.s[2] \n"
"fadd v0.4s, v0.4s, v9.4s \n"
"st1 {v0.4s}, [%1], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6) // %8
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k78910), // %20
"w"(_k11121314), // %21
"w"(_k14151617), // %22
"w"(_k18192021), // %23
"w"(_k21222324), // %24
"w"(_k25262728), // %25
"w"(_k28293031), // %26
"w"(_k32333435), // %27
"w"(_k35363738), // %28
"w"(_k39404142), // %29
"w"(_k42434445), // %30
"w"(_k46474849) // %31
: "cc", "memory","v0", "v1", "v2", "v3", "v4", "v5", "v6", "v9"
);
}
#else // __ARM_NEON && __aarch64__ defined, but __clang__ not defined
// When compiled with gcc, gcc does not accept over 30 operands
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4x2_t _r00_02461357 = vld2q_f32(r0);
float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8);
float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14
float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15
float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6
float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8
float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9
float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10
float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11
float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4x2_t _r10_02461357 = vld2q_f32(r1);
float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8);
float32x4_t _r1_8101214 = _r10nx2.val[0];
float32x4_t _r1_9111315 = _r10nx2.val[1];
float32x4_t _r10 = _r10_02461357.val[0];
float32x4_t _r11 = _r10_02461357.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1);
float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1);
float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2);
float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2);
float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4x2_t _r20_02461357 = vld2q_f32(r2);
float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8);
float32x4_t _r2_8101214 = _r20nx2.val[0];
float32x4_t _r2_9111315 = _r20nx2.val[1];
float32x4_t _r20 = _r20_02461357.val[0];
float32x4_t _r21 = _r20_02461357.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1);
float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1);
float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2);
float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2);
float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4x2_t _r30_02461357 = vld2q_f32(r3);
float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8);
float32x4_t _r3_8101214 = _r30nx2.val[0];
float32x4_t _r3_9111315 = _r30nx2.val[1];
float32x4_t _r30 = _r30_02461357.val[0];
float32x4_t _r31 = _r30_02461357.val[1];
float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1);
float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1);
float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2);
float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2);
float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4x2_t _r40_02461357 = vld2q_f32(r4);
float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8);
float32x4_t _r4_8101214 = _r40nx2.val[0];
float32x4_t _r4_9111315 = _r40nx2.val[1];
float32x4_t _r40 = _r40_02461357.val[0];
float32x4_t _r41 = _r40_02461357.val[1];
float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1);
float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1);
float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2);
float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2);
float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4x2_t _r50_02461357 = vld2q_f32(r5);
float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8);
float32x4_t _r5_8101214 = _r50nx2.val[0];
float32x4_t _r5_9111315 = _r50nx2.val[1];
float32x4_t _r50 = _r50_02461357.val[0];
float32x4_t _r51 = _r50_02461357.val[1];
float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1);
float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1);
float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2);
float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2);
float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4x2_t _r60_02461357 = vld2q_f32(r6);
float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8);
float32x4_t _r6_8101214 = _r60nx2.val[0];
float32x4_t _r6_9111315 = _r60nx2.val[1];
float32x4_t _r60 = _r60_02461357.val[0];
float32x4_t _r61 = _r60_02461357.val[1];
float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1);
float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1);
float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2);
float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2);
float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
outptr += 4;
}
#endif // __clang__
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d26-d27}, [%1] \n"// _sum
// "veor q14, q14 \n"// _sum2 = 0;
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #512] \n"
"vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"vmla.f32 q13, q0, d8[0] \n"
"vmul.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15
"vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8
"vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9
"vmul.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10
"vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12
"vmla.f32 q13, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #512] \n"
"vld2.f32 {d0-d3}, [%3]! \n"
"vmla.f32 q14, q0, d12[0] \n"
"vmla.f32 q15, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%3] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q13, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #512] \n"
"vld2.f32 {d0-d3}, [%4]! \n"
"vmla.f32 q15, q0, d8[0] \n"
"vmla.f32 q13, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%4] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #512] \n"
"vld2.f32 {d0-d3}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"vmla.f32 q14, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%5] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #512] \n"
"vld2.f32 {d0-d3}, [%6]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"vmla.f32 q15, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%6] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #512] \n"
"vld2.f32 {d0-d3}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"vmla.f32 q13, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%7] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d14[0] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #512] \n"
"vld2.f32 {d0-d3}, [%8]! \n"
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%8] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d11[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q13, q13, q14 \n"
"vst1.f32 {d26-d27}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
spbprop.h | //--------------------------------------------------------------------------------
// Copyright (c) 2017-2020, sanko-shoko. All rights reserved.
//--------------------------------------------------------------------------------
#ifndef __SP_BPROP_H__
#define __SP_BPROP_H__
#include "spcore/spcore.h"
namespace sp {
//--------------------------------------------------------------------------------
// belief propagation
//--------------------------------------------------------------------------------
class BeliefPropagation {
private:
struct Node;
struct Link;
struct Node {
// label
int label;
const int *cost;
// first link pointer
Link *link;
};
struct Link {
// id
int id;
// cost
int cost;
// connected node (root -> targ)
Node *root;
// connected node (root -> targ)
Node *targ;
// next link
Link *next;
// pair link (<=>)
Link *pair;
};
int m_labelNum;
Mem1<Node> m_nodes;
Mem1<Link> m_links;
Mem3<int> m_buff;
public:
BeliefPropagation() {
m_labelNum = 0;
}
BeliefPropagation(const int labelNum, const int nodeMax, const int linkMax) {
init(labelNum, nodeMax, linkMax);
}
void init(const int labelNum, const int nodeMax, const int linkMax) {
m_labelNum = labelNum;
m_nodes.clear();
m_nodes.resize(nodeMax);
m_nodes.zero();
m_links.clear();
m_links.reserve(2 * linkMax);
m_buff.resize(2 * linkMax, labelNum, 2);
m_buff.zero();
}
void setNode(const int i, const int *cost) {
Node *node = &m_nodes[i];
node->label = 0;
node->cost = cost;
}
void setLink(int i, int j, int cost) {
const int id = m_links.size();
Link *linkAB = m_links.extend();
Link *linkBA = m_links.extend();
Node* A = &m_nodes[i];
Node* B = &m_nodes[j];
linkAB->id = id;
linkAB->next = A->link;
linkAB->pair = linkBA;
linkAB->root = A;
linkAB->targ = B;
linkAB->cost = cost;
A->link = linkAB;
linkBA->id = id + 1;
linkBA->next = B->link;
linkBA->pair = linkAB;
linkBA->root = B;
linkBA->targ = A;
linkBA->cost = cost;
B->link = linkBA;
}
int getLabel(int i) {
return m_nodes[i].label;
}
//--------------------------------------------------------------------------------
// execute
//--------------------------------------------------------------------------------
void execute(const int itmax = 10) {
for (int it = 0; it < itmax; it++) {
const int s = it % 2;
const int d = 1 - s;
#if SP_USE_OMP
#pragma omp parallel for
#endif
for (int i = 0; i < m_links.size(); i++) {
Link &link = m_links[i];
Node *node = link.root;
for(int a = 0; a < m_labelNum; a++){
int minv = SP_INTMAX;
for (int b = 0; b < m_labelNum; b++) {
int c = node->cost[b];
if(a != b) c += link.cost;
Link *n = node->link;
while (n != NULL) {
c += m_buff(n->pair->id, b, s);
n = n->next;
}
if (c < minv) {
minv = c;
}
}
m_buff(i, a, d) = minv;
}
}
}
{
const int s = itmax % 2;
for (int i = 0; i < m_nodes.size(); i++) {
Node &node = m_nodes[i];
int minv = SP_INTMAX;
for (int a = 0; a < m_labelNum; a++) {
int c = node.cost[a];
Link *n = node.link;
while (n != NULL) {
c += m_buff(n->pair->id, a, s);
n = n->next;
}
if (c < minv) {
minv = c;
node.label = a;
}
}
}
}
}
public:
};
}
#endif |
GB_binop__min_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint32)
// A*D function (colscale): GB (_AxD__min_uint32)
// D*A function (rowscale): GB (_DxB__min_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__min_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__min_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint32)
// C=scalar+B GB (_bind1st__min_uint32)
// C=scalar+B' GB (_bind1st_tran__min_uint32)
// C=A+scalar GB (_bind2nd__min_uint32)
// C=A'+scalar GB (_bind2nd_tran__min_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_UINT32 || GxB_NO_MIN_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_task_detach.c | <ompts:test>
<ompts:testdescription>Test which checks the detach clause of the omp task directive.</ompts:testdescription>
<ompts:ompversion>5.0</ompts:ompversion>
<ompts:directive>omp task detach,omp_fulfill_event</ompts:directive>
<ompts:dependences>omp task, omp atomic</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
<ompts:orphan:vars>
int result1 = 0;
int result2 = 0;
</ompts:orphan:vars>
int <ompts:testcode:functionname>omp_task_detach</ompts:testcode:functionname>(FILE * logFile){
result1 = 0;
result2 = 0;
#pragma omp parallel
{
#pragma omp master
{
<ompts:orphan>
omp_event_handle_t event;
#pragma omp task shared(result1) <ompts:check>detach(event)</ompts:check>
{
<ompts:check>omp_fulfill_event(event);</ompts:check>
result1 = 1;
}
</ompts:orphan>
#pragma omp taskwait
}
#pragma omp master
{
<ompts:crosscheck>int dummy = 0;</ompts:crosscheck>
omp_event_handle_t event;
#pragma omp task depend(out: result2) shared(result2) <ompts:check>detach(event)</ompts:check>
{
#pragma omp atomic
result2++;
}
#pragma omp task depend(out:result2) shared(result2) <ompts:crosscheck>depend(in:dummy)</ompts:crosscheck>
{
result2 *= 2;
}
#pragma omp task shared(result2) <ompts:crosscheck>depend(out:dummy)</ompts:crosscheck>
{
my_sleep (SLEEPTIME);
#pragma omp atomic
result2++;
<ompts:check>omp_fulfill_event(event);</ompts:check>
}
#pragma omp taskwait
}
}
printf("result1: %d\n", result1);
printf("result2: %d\n", result2);
return (result1 == 1) && (result2 == 4);
}
</ompts:testcode>
</ompts:test>
|
embedded_postprocess_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Ruben Zorrilla
//
//
#ifndef KRATOS_EMBEDDED_POSTPROCESS_PROCESS_H
#define KRATOS_EMBEDDED_POSTPROCESS_PROCESS_H
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "processes/process.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
// Application includes
namespace Kratos
{
///@addtogroup FluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Utility to filter the embedded velocity and pressure values
class EmbeddedPostprocessProcess : public Process
{
public:
///@name Type Definitions
///@{
/// Pointer definition of EmbeddedPostprocessProcess
KRATOS_CLASS_POINTER_DEFINITION(EmbeddedPostprocessProcess);
///@}
///@name Life Cycle
///@{
/// Constructor.
EmbeddedPostprocessProcess(ModelPart& rModelPart) : mrModelPart(rModelPart)
{
}
/// Destructor.
~EmbeddedPostprocessProcess() override{}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
void ExecuteFinalizeSolutionStep() override
{
const array_1d<double, 3> aux_zero = ZeroVector(3);
ModelPart::NodesContainerType& rNodes = mrModelPart.Nodes();
// Simple check
if( mrModelPart.NodesBegin()->SolutionStepsDataHas( DISTANCE ) == false )
KRATOS_ERROR << "Nodes do not have DISTANCE variable!";
if( mrModelPart.NodesBegin()->SolutionStepsDataHas( PRESSURE ) == false )
KRATOS_ERROR << "Nodes do not have PRESSURE variable!";
if( mrModelPart.NodesBegin()->SolutionStepsDataHas( VELOCITY ) == false )
KRATOS_ERROR << "Nodes do not have VELOCITY variable!";
if( mrModelPart.NodesBegin()->SolutionStepsDataHas( EMBEDDED_WET_PRESSURE ) == false )
KRATOS_ERROR << "Nodes do not have EMBEDDED_WET_PRESSURE variable!";
if( mrModelPart.NodesBegin()->SolutionStepsDataHas( EMBEDDED_WET_VELOCITY ) == false )
KRATOS_ERROR << "Nodes do not have EMBEDDED_WET_VELOCITY variable!";
// Embedded postprocess variables set
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rNodes.size()); ++k)
{
ModelPart::NodesContainerType::iterator itNode = rNodes.begin() + k;
const double dist = itNode->FastGetSolutionStepValue(DISTANCE);
double& emb_wet_pres = itNode->FastGetSolutionStepValue(EMBEDDED_WET_PRESSURE);
array_1d<double, 3>& emb_wet_vel = itNode->FastGetSolutionStepValue(EMBEDDED_WET_VELOCITY);
if (dist >= 0.0)
{
emb_wet_pres = itNode->FastGetSolutionStepValue(PRESSURE); // Store the positive distance nodes PRESSURE in EMBEDDED_WET_PRESSURE variable
emb_wet_vel = itNode->FastGetSolutionStepValue(VELOCITY); // Store the positive distance nodes VELOCITY in EMBEDDED_WET_VELOCITY variable
}
else
{
emb_wet_pres = 0.0; // The negative distance nodes EMBEDDED_WET_PRESSURE is set to zero for visualization purposes
emb_wet_vel = aux_zero; // The negative distance nodes EMBEDDED_WET_VELOCITY is set to zero for visualization purposes
}
}
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "EmbeddedPostprocessProcess" ;
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override {rOStream << "EmbeddedPostprocessProcess";}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
ModelPart& mrModelPart;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Default constructor.
EmbeddedPostprocessProcess() = delete;
/// Assignment operator.
EmbeddedPostprocessProcess& operator=(EmbeddedPostprocessProcess const& rOther) = delete;
/// Copy constructor.
EmbeddedPostprocessProcess(EmbeddedPostprocessProcess const& rOther) = delete;
///@}
}; // Class EmbeddedPostprocessProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
///@} addtogroup block
}; // namespace Kratos.
#endif // KRATOS_EMBEDDED_POSTPROCESS_PROCESS_H
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPublic())
/// matches 'int a;'
AST_MATCHER(Decl, isPublic) {
return Node.getAccess() == AS_public;
}
/// Matches protected C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isProtected())
/// matches 'int b;'
AST_MATCHER(Decl, isProtected) {
return Node.getAccess() == AS_protected;
}
/// Matches private C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPrivate())
/// matches 'int c;'
AST_MATCHER(Decl, isPrivate) {
return Node.getAccess() == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatchers.
///
/// However, \c optionally will generate a result binding for each matching
/// submatcher.
///
/// Useful when additional information which may or may not present about a
/// main matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
1, std::numeric_limits<unsigned>::max()>
optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::Matcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::Matcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name);
}
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; })
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches if the given method declaration is virtual.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isVirtual) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the Stmt AST node that is marked as being the structured-block
/// of an OpenMP executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``stmt(isOMPStructuredBlock()))`` matches ``{}``.
AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); }
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
DRB071-targetparallelfor-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
use of omp target: len is not mapped. It should be firstprivate within target.
*/
int main(int argc, char * argv[])
{
int i;
int len = 1000;
int a[len];
int _ret_val_0;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
a[i]=i;
}
#pragma cetus private(i)
#pragma loop name main#1
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
a[i]=(a[i]+1);
}
#pragma cetus private(i)
#pragma loop name main#2
for (i=0; i<len; i ++ )
{
printf("%d", a[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
|
GB_reduce_panel.c | //------------------------------------------------------------------------------
// GB_reduce_panel: s=reduce(A), reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Reduce a matrix to a scalar using a panel-based method for built-in
// operators. No typecasting is performed.
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ;
int64_t anz = GB_NNZ (A) ;
ASSERT (anz > 0) ;
#if GB_IS_ANY_MONOID
// the ANY monoid can take any entry, and terminate immediately
s = Ax [anz-1] ;
#else
//--------------------------------------------------------------------------
// typecast workspace
//--------------------------------------------------------------------------
// ctype W [ntasks] ;
GB_CTYPE *GB_RESTRICT W = (GB_CTYPE *) W_space ;
//--------------------------------------------------------------------------
// reduce A to a scalar
//--------------------------------------------------------------------------
if (nthreads == 1)
{
//----------------------------------------------------------------------
// load the Panel with the first entries
//----------------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//----------------------------------------------------------------------
// reduce all entries to the Panel
//----------------------------------------------------------------------
for (int64_t p = GB_PANEL ; p < anz ; p += GB_PANEL)
{
if (p + GB_PANEL > anz)
{
// last partial panel
for (int64_t k = 0 ; k < anz-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// full panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//----------------------------------------------------------------------
// s = reduce (Panel)
//----------------------------------------------------------------------
s = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// s = op (s, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (s, Panel, k) ;
}
}
else
{
//----------------------------------------------------------------------
// all tasks share a single early_exit flag
//----------------------------------------------------------------------
// If this flag gets set, all tasks can terminate early
#if GB_HAS_TERMINAL
bool early_exit = false ;
#endif
//----------------------------------------------------------------------
// each thread reduces its own slice in parallel
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the work for this task
//------------------------------------------------------------------
// Task tid reduces Ax [pstart:pend-1] to the scalar W [tid]
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, anz, tid, ntasks) ;
GB_ATYPE t = Ax [pstart] ;
//------------------------------------------------------------------
// skip this task if the terminal value has already been reached
//------------------------------------------------------------------
#if GB_HAS_TERMINAL
// check if another task has called for an early exit
bool my_exit ;
GB_ATOMIC_READ
my_exit = early_exit ;
if (!my_exit)
#endif
//------------------------------------------------------------------
// do the reductions for this task
//------------------------------------------------------------------
{
//--------------------------------------------------------------
// load the Panel with the first entries
//--------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t my_anz = pend - pstart ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, my_anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [pstart + k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//--------------------------------------------------------------
// reduce all entries to the Panel
//--------------------------------------------------------------
for (int64_t p = pstart + GB_PANEL ; p < pend ; p += GB_PANEL)
{
if (p + GB_PANEL > pend)
{
// last partial panel
for (int64_t k = 0 ; k < pend-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// full panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//--------------------------------------------------------------
// t = reduce (Panel)
//--------------------------------------------------------------
t = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// t = op (t, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (t, Panel, k) ;
}
#if GB_HAS_TERMINAL
if (t == GB_TERMINAL_VALUE)
{
// tell all other tasks to exit early
GB_ATOMIC_WRITE
early_exit = true ;
}
#endif
}
//------------------------------------------------------------------
// save the results of this task
//------------------------------------------------------------------
W [tid] = t ;
}
//----------------------------------------------------------------------
// sum up the results of each slice using a single thread
//----------------------------------------------------------------------
s = W [0] ;
for (int tid = 1 ; tid < ntasks ; tid++)
{
// s = op (s, W [tid]), no typecast
GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ;
}
}
#endif
}
|
gbdt.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <string>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include <LightGBM/json11.hpp>
#include "score_updater.hpp"
using namespace json11;
namespace LightGBM {
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequence of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
virtual bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
virtual const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
virtual int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_preb_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
if (num_iteration > 0) {
num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration));
} else {
num_preb_in_one_row *= max_iteration;
}
} else if (is_pred_contrib) {
num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_preb_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
virtual bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \return Non-empty string if succeeded
*/
virtual std::string SaveModelToString(int start_iteration, int num_iterations) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_);
}
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
virtual const char* SubModelName() const override { return "tree"; }
protected:
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
/*!
* \brief Helper function for bagging, used for multi-threading optimization
* \param start start indice of bagging
* \param cnt count
* \param buffer output buffer
* \return count of left size
*/
data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
/*!
* \brief Helper function for bagging, used for multi-threading optimization, balanced sampling
* \param start start indice of bagging
* \param cnt count
* \param buffer output buffer
* \return count of left size
*/
data_size_t BalancedBaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current interation
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
/*! \brief First order derivative of training data */
std::vector<score_t> gradients_;
/*! \brief Secend order derivative of training data */
std::vector<score_t> hessians_;
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t> tmp_indices_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
/*! \brief number of threads */
int num_threads_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> offsets_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> left_cnts_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> right_cnts_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> left_write_pos_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> right_write_pos_buf_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
Json forced_splits_json_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
axpy_int.c | //axpy.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 1200
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(int *X, int *Y) {
for (int i = 0; i<N; i++) {
X[i] = (int)rand()/(int)(RAND_MAX/10.0);
Y[i] = (int)rand()/(int)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
void axpy(int *X, int *Y, int a) {
#pragma omp simd
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
// Debug functions
void axpy_serial(int *X, int *Y, int a) {
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
void print_vector(int *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%d ", vector[i]);
}
puts("]");
}
int check(int *A, int *B){
int difference = 0;
for(int i = 0;i<N; i++){
difference += A[i]- B[i];
}
return difference;
}
int main(int argc, char **argv) {
//Set everything up
int *X = malloc(sizeof(int)*N);
int *Y = malloc(sizeof(int)*N);
int *Y_serial = malloc(sizeof(int)*N);
int a = 3;
srand(time(NULL));
init(X, Y);
for (int i = 0; i<N; i++) Y_serial[i] = Y[i];
print_vector(Y);
print_vector(X);
printf("%d\n", a);
puts("=\n");
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy(X, Y, a);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy_serial(X, Y_serial, a);
double t_serial = (read_timer() - start_serial);
print_vector(Y);
puts("---------------------------------");
print_vector(Y_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("AXPY (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("AXPY (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %d\n", check(Y,Y_serial));
free(X);
free(Y);
free(Y_serial);
return 0;
}
|
selection_move_generator.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_NEIGHBORHOOD_SELECTION_MOVE_MOVE_GENERATOR_H__
#define PRINTEMPS_NEIGHBORHOOD_SELECTION_MOVE_MOVE_GENERATOR_H__
#include "abstract_move_generator.h"
namespace printemps {
namespace neighborhood {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class SelectionMoveGenerator
: public AbstractMoveGenerator<T_Variable, T_Expression> {
private:
public:
/*************************************************************************/
SelectionMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
virtual ~SelectionMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
constexpr void setup(
std::vector<model_component::Variable<T_Variable, T_Expression> *>
&a_VARIABLE_PTRS) {
/**
* "Swap" move for binary variables in selection
* constraints: e.g.) selection constraint x + y + z = 1 (x,
* y, z \in {0, 1}) move: {(x = 0, y = 1), (x = 0, z = 1)}
* (if x = 1, y = 0, z = 0)
*/
/**
* Setup move objects.
*/
const int VARIABLES_SIZE = a_VARIABLE_PTRS.size();
this->m_moves.resize(VARIABLES_SIZE);
this->m_flags.resize(VARIABLES_SIZE);
for (auto i = 0; i < VARIABLES_SIZE; i++) {
auto &move = this->m_moves[i];
move.sense = MoveSense::Selection;
move.related_constraint_ptrs =
a_VARIABLE_PTRS[i]->selection_ptr()->related_constraint_ptrs;
move.is_univariable_move = false;
move.is_selection_move = true;
move.is_special_neighborhood_move = false;
move.is_available = true;
move.overlap_rate = 0.0;
}
/**
* Setup move updater
*/
auto move_updater = //
[this, a_VARIABLE_PTRS, VARIABLES_SIZE](
auto * a_moves_ptr, //
auto * a_flags, //
const bool a_ACCEPT_ALL, //
const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, //
const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, //
[[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) {
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < VARIABLES_SIZE; i++) {
(*a_moves_ptr)[i].alterations.clear();
(*a_moves_ptr)[i].alterations.emplace_back(
a_VARIABLE_PTRS[i]
->selection_ptr()
->selected_variable_ptr,
0);
(*a_moves_ptr)[i].alterations.emplace_back(
a_VARIABLE_PTRS[i], 1);
}
const int MOVES_SIZE = a_moves_ptr->size();
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < MOVES_SIZE; i++) {
(*a_flags)[i] = 1;
if (neighborhood::has_fixed_variable((*a_moves_ptr)[i])) {
(*a_flags)[i] = 0;
continue;
}
if ((*a_moves_ptr)[i].alterations[0].first ==
(*a_moves_ptr)[i].alterations[1].first) {
(*a_flags)[i] = 0;
continue;
}
if (a_ACCEPT_ALL) {
/** nothing to do */
} else {
if (a_ACCEPT_OBJECTIVE_IMPROVABLE &&
neighborhood::has_objective_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
if (a_ACCEPT_FEASIBILITY_IMPROVABLE &&
neighborhood::has_feasibility_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
(*a_flags)[i] = 0;
}
}
};
this->m_move_updater = move_updater;
}
};
} // namespace neighborhood
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
DRACC_OMP_036_SAXPY_without_Task_Dependency_yes.c | /*
SAXPY without dependencies between the addition and multiplication task.
*/
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#define C 20000
float a;
float x[C];
float y[C];
int init(){
for(int i=0; i<C; i++){
a=5;
x[i]=0;
y[i]=3;
}
return 0;
}
int saxpy(){
#pragma omp target map(to:y[0:C],a) map(tofrom:x[0:C]) device(0)
{
#pragma omp teams distribute
for(int i=0; i<2*C; i++){
if(i%2==0){
#pragma omp task
{
x[i] = a * x[i];
}
}else
{
#pragma omp task
{
x[i] = x[i] + y[i];
}
}
}
}
return 0;
}
int check(){
bool test = false;
for(int i=0; i<C; i++){
if(x[i]!=3){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
init();
saxpy();
check();
return 0;
} |
GB_binop__isne_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int32)
// A*D function (colscale): GB (_AxD__isne_int32)
// D*A function (rowscale): GB (_DxB__isne_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int32)
// C=scalar+B GB (_bind1st__isne_int32)
// C=scalar+B' GB (_bind1st_tran__isne_int32)
// C=A+scalar GB (_bind2nd__isne_int32)
// C=A'+scalar GB (_bind2nd_tran__isne_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT32 || GxB_NO_ISNE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wave3d_perf_b.c |
#ifndef TAPENADE
#include <math.h>
#endif
#define Max(x,y) fmax(x,y)
#define Min(x,y) fmin(x,y)
#define Heaviside(x) ((x>=0)?1.0:0.0)
#define u(x,xx,xxx) u[x][xx][xxx]
#define u_b(x,xx,xxx) u_b[x][xx][xxx]
#define c(x,xx,xxx) c[x][xx][xxx]
#define u_1(x,xx,xxx) u_1[x][xx][xxx]
#define u_1_b(x,xx,xxx) u_1_b[x][xx][xxx]
#define u_2(x,xx,xxx) u_2[x][xx][xxx]
#define u_2_b(x,xx,xxx) u_2_b[x][xx][xxx]
void wave3d_perf_b(double*** u, double*** u_b, double*** c, double*** u_1, double*** u_1_b, double*** u_2, double*** u_2_b, double D, int n) {
int i;
int j;
int k;
i=0;
//#pragma omp parallel for private(k,j)
for ( j=1; j<=n - 2; j++ ) {
for ( k=1; k<=n - 2; k++ ) {
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
}
}
i=n - 2;
j=0;
//#pragma omp parallel for private(k)
for ( k=1; k<=n - 2; k++ ) {
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
}
i=n - 2;
j=n - 2;
k=0;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
i=n - 2;
j=n - 2;
k=n - 2;
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
i=n - 2;
j=n - 2;
k=1;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
i=n - 2;
j=n - 2;
k=n - 1;
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
i=n - 2;
j=n - 2;
//#pragma omp parallel for private(k)
for ( k=2; k<=n - 3; k++ ) {
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
i=n - 2;
j=1;
k=0;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
i=n - 2;
j=1;
k=n - 2;
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
i=n - 2;
j=1;
k=1;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
i=n - 2;
j=1;
k=n - 1;
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
i=n - 2;
j=1;
//#pragma omp parallel for private(k)
for ( k=2; k<=n - 3; k++ ) {
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
i=n - 2;
j=n - 1;
//#pragma omp parallel for private(k)
for ( k=1; k<=n - 2; k++ ) {
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
}
i=n - 2;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
k=0;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
}
i=n - 2;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
k=n - 2;
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
i=n - 2;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
k=1;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
}
i=n - 2;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
k=n - 1;
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
i=n - 2;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
for ( k=2; k<=n - 3; k++ ) {
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
}
i=1;
j=0;
//#pragma omp parallel for private(k)
for ( k=1; k<=n - 2; k++ ) {
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
}
i=1;
j=n - 2;
k=0;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
i=1;
j=n - 2;
k=n - 2;
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
i=1;
j=n - 2;
k=1;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
i=1;
j=n - 2;
k=n - 1;
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
i=1;
j=n - 2;
//#pragma omp parallel for private(k)
for ( k=2; k<=n - 3; k++ ) {
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
i=1;
j=1;
k=0;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
i=1;
j=1;
k=n - 2;
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
i=1;
j=1;
k=1;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
i=1;
j=1;
k=n - 1;
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
i=1;
j=1;
//#pragma omp parallel for private(k)
for ( k=2; k<=n - 3; k++ ) {
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
i=1;
j=n - 1;
//#pragma omp parallel for private(k)
for ( k=1; k<=n - 2; k++ ) {
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
}
i=1;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
k=0;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
}
i=1;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
k=n - 2;
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
i=1;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
k=1;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
}
i=1;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
k=n - 1;
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
i=1;
//#pragma omp parallel for private(k,j)
for ( j=2; j<=n - 3; j++ ) {
for ( k=2; k<=n - 3; k++ ) {
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
}
i=n - 1;
//#pragma omp parallel for private(k,j)
for ( j=1; j<=n - 2; j++ ) {
for ( k=1; k<=n - 2; k++ ) {
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
}
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=0;
for ( k=1; k<=n - 2; k++ ) {
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
}
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=n - 2;
k=0;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=n - 2;
k=n - 2;
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=n - 2;
k=1;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=n - 2;
k=n - 1;
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=n - 2;
for ( k=2; k<=n - 3; k++ ) {
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=1;
k=0;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=1;
k=n - 2;
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=1;
k=1;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=1;
k=n - 1;
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=1;
for ( k=2; k<=n - 3; k++ ) {
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
j=n - 1;
for ( k=1; k<=n - 2; k++ ) {
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
}
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
for ( j=2; j<=n - 3; j++ ) {
k=0;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
}
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
for ( j=2; j<=n - 3; j++ ) {
k=n - 2;
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
for ( j=2; j<=n - 3; j++ ) {
k=1;
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
}
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
for ( j=2; j<=n - 3; j++ ) {
k=n - 1;
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
}
//#pragma omp parallel for private(k,j,i)
for ( i=2; i<=n - 3; i++ ) {
for ( j=2; j<=n - 3; j++ ) {
for ( k=2; k<=n - 3; k++ ) {
u_1_b(i,j,k) += D*c(i, j, k + 1)*u_b(i, j, k + 1);
u_1_b(i,j,k) += D*c(i, j + 1, k)*u_b(i, j + 1, k);
u_1_b(i,j,k) += D*c(i + 1, j, k)*u_b(i + 1, j, k);
u_1_b(i,j,k) += (-6*D*c(i, j, k) + 2.0)*u_b(i, j, k);
u_2_b(i,j,k) += -u_b(i, j, k);
u_1_b(i,j,k) += D*c(i - 1, j, k)*u_b(i - 1, j, k);
u_1_b(i,j,k) += D*c(i, j - 1, k)*u_b(i, j - 1, k);
u_1_b(i,j,k) += D*c(i, j, k - 1)*u_b(i, j, k - 1);
}
}
}
}
|
GB_binop__ge_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int32)
// A*D function (colscale): GB (_AxD__ge_int32)
// D*A function (rowscale): GB (_DxB__ge_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int32)
// C=scalar+B GB (_bind1st__ge_int32)
// C=scalar+B' GB (_bind1st_tran__ge_int32)
// C=A+scalar GB (_bind2nd__ge_int32)
// C=A'+scalar GB (_bind2nd_tran__ge_int32)
// C type: bool
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT32 || GxB_NO_GE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parallel_update_ldl_02_2.h | //
// Created by Kazem on 1/21/19.
//
#ifndef PROJECT_PARALLEL_UPDATE_LDL_02_2_H
#define PROJECT_PARALLEL_UPDATE_LDL_02_2_H
namespace nasoq {
//
// Created by kazem on 12/12/18.
//
#include <stdlib.h>
#include <cmath>
#include <cassert>
#include "mkl.h"
#include "Reach.h"
#include "Sym_BLAS.h"
#undef TIMING
#undef TLAST
#undef TIMING1
#undef BLASTIMING
bool update_ldl_left_sn_parallel_02(int n, const int *c, const int *r, const double *values,
const size_t *lC, int *lR, const size_t *Li_ptr, double *lValues,
double *D,
const int *blockSet, const int supNo, double *timing,
#ifndef PRUNE
int *aTree, int *cT, int *rT, int *col2Sup,
#else
int *prunePtr, int *pruneSet,
#endif
const int nLevels, const int *levelPtr, const int *levelSet,
const int nPar, const int *parPtr, const int *partition,
const int chunk, const int threads,
const int super_max, const int col_max, int &nbpivot,
int *perm_piv,
bool *marked, double threshold = 1e-13) {
/*
* For timing using BLAS
*/
const int incx = 1;
int top = 0;
int *xi; //= new int[2*supNo]();
int *swap_full = new int[n]();
std::vector<int> perm_req;
//int super_max = 64; //tunig parameter for the max size of supernodes TODO: find it in analysis
//int col_max = n;
int *map; //= new int[n]();
double *contribs; //= new double[super_max*col_max]();
double *trn_diag; //= new double[super_max*col_max]();
int info;
double one[2], zero[2];
one[0] = 1.0; /* ALPHA for *syrk, *herk, *gemm, and *trsm */
one[1] = 0.;
zero[0] = 0.; /* BETA for *syrk, *herk, and *gemm */
zero[1] = 0.;
int *ipiv;
int *ws;
for (int i = 0; i < n; ++i) {
perm_piv[i] = i;
}
std::chrono::time_point<std::chrono::system_clock> start, end, startin, endin;
std::chrono::duration<double> elapsed_seconds;
double duration4 = 0, duration3 = 0, duration2 = 0, duration1 = 0;
#ifdef TIMING
start = std::chrono::system_clock::now();
#endif
for (int i1 = 0; i1 < nLevels - 1; ++i1) {
#pragma omp parallel //shared(lValues)//private(map, contribs)
{
#pragma omp for schedule(dynamic) private(map, trn_diag, contribs, xi, ipiv, ws, startin, endin, duration2)
for (int j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) {
#ifdef BLASTIMING
int threadID = omp_get_thread_num();
std::chrono::time_point<std::chrono::system_clock> startBlas, endBlas;
#endif
map = new int[n]();
contribs = new double[super_max * col_max]();
xi = new int[3 * supNo]();
trn_diag = new double[super_max * col_max]();
ws = new int[3 * super_max]();
ipiv = new int[super_max]();
//int pls = levelSet[j1];
#ifdef TIMING1
startin = std::chrono::system_clock::now();
#endif
//#pragma omp parallel for schedule(static,chunk)private(thth)
for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) {
int s = partition[k1] + 1;
if (!marked[s - 1])
continue;
int curCol = s != 0 ? blockSet[s - 1] : 0;
int nxtCol = blockSet[s];
int supWdt = nxtCol - curCol;
int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode
for (int i = Li_ptr[curCol], cnt = 0; i < Li_ptr[nxtCol]; ++i) {
map[lR[i]] = cnt++;//mapping L rows position to actual row idx
}
double *src, *cur = &lValues[lC[curCol]], *cur_d = &D[curCol];//pointing to first element of the current supernode
//Reseting the current supernode.
for (int i = 0; i < supWdt; ++i) {
cur_d[i] = 0;
cur_d[i + n] = 0;
for (int j = 0; j < nSupR; ++j) {
cur[i * nSupR + j] = 0.0;
}
}
//copy the columns from A to L
for (int i = curCol; i < nxtCol; ++i) {//Copy A to L
int pad = i - curCol;
for (int j = c[i]; j < c[i + 1]; ++j) {
lValues[lC[i] + map[r[j]]] = values[j];
}
}
#if 0
for (int i = curCol; i < nxtCol; ++i) {//Copy A to L
std::cout<<"\n";
for (int j = lC[i]; j < lC[i+1] ; ++j) {
std::cout<<lValues[j]<<";";
}
}
std::cout<<"\n\n";
#endif
//double *src, *cur=&lValues[lC[curCol]];//pointing to first element of the current supernode
top = ereach_sn(supNo, cT, rT, curCol, nxtCol, col2Sup, aTree, xi, xi + supNo);
assert(top >= 0);
//int *lbs = xi+supNo, *ubs = xi + 2*supNo;//To use for row permutation
//if(s==2){top =2; xi[top] = 0;}
for (int i = top; i < supNo; ++i) {
int lSN = xi[i];
int nSupRs = 0;
int cSN = blockSet[lSN];//first col of current SN
int cNSN = blockSet[lSN + 1];//first col of Next SN
int Li_ptr_cNSN = Li_ptr[cNSN];
int Li_ptr_cSN = Li_ptr[cSN];
int nSNRCur = Li_ptr_cNSN - Li_ptr_cSN;
int supWdts = cNSN - cSN;//The width of current src SN
int lb = 0, ub = 0;
bool sw = true;
int beg_col = cSN, end_col = 0;
for (int j = Li_ptr_cSN; j < Li_ptr_cNSN; ++j) {
//finding the overlap between curCol and curCol+supWdt in the src col
if (lR[j] >= curCol && sw) {
//src*transpose(row lR[j])
lb = j - Li_ptr_cSN;
//lbs[i] = lb;
sw = false;
}
if (lR[j] < curCol + supWdt && !sw) {
ub = j - Li_ptr_cSN;
//ubs[i] = ub;
}
}
nSupRs = Li_ptr_cNSN - Li_ptr_cSN - lb;
int ndrow1 = ub - lb + 1;
int ndrow3 = nSupRs - ndrow1;
src = &lValues[lC[cSN] + lb];//first element of src supernode starting from row lb
double *srcL = &lValues[lC[cSN] + ub + 1];
blocked_2by2_mult(supWdts, nSupRs, &D[cSN], src, trn_diag, nSNRCur, n);
dgemm("N", "C", &nSupRs, &ndrow1, &supWdts, one, trn_diag, &nSupRs,
src, &nSNRCur, zero, contribs, &nSupRs);
// }
//copying contrib to L
for (int i = 0; i < ndrow1; ++i) {//Copy contribs to L
int col = map[lR[Li_ptr_cSN + i + lb]];//col in the SN
//double ddiag = 1.0 ;/// D[col];
for (int j = i; j < nSupRs; ++j) {
int cRow = lR[Li_ptr_cSN + j + lb];//corresponding row in SN
//lValues[lC[curCol+col]+ map[cRow]] -= contribs[i*nSupRs+j];
cur[col * nSupR + map[cRow]] -= contribs[i * nSupRs + j];
/* if ( cRow == 78){
std::cout<<"\n====="<<cSN<<"|| "<< cRow<<";;"<<contribs[i*nSupRs+j]<<";;"
<<cur[col*nSupR+map[cRow]]<<";;"<<"\n";
}*/
}
}
}
LAPACKE_dsytrf(LAPACK_COL_MAJOR, 'L', supWdt, cur, nSupR, ipiv);
int is_perm = reorder_after_sytrf(supWdt, cur, nSupR, ipiv,
&perm_piv[curCol], &D[curCol], n, &swap_full[curCol], ws + supWdt);
// re-order the columns of the super-node
int rowNo = nSupR - supWdt;
for (int m = 0; m < supWdt; ++m) {
perm_piv[curCol + m]++;
}
if (is_perm) {
LAPACKE_dlapmt(LAPACK_COL_MAJOR, 1, rowNo, supWdt, &cur[supWdt], nSupR, &perm_piv[curCol]);
#pragma omp critical
perm_req.push_back(s);
}
//reordering row
for (int k1 = 0; k1 < supWdt; ++k1) {
perm_piv[curCol + k1] += (curCol - 1);
// perm_piv++;
}
for (int l = 0; l < supWdt; ++l) {
D[curCol + l] = cur[l + l * nSupR];
cur[l + l * nSupR] = 1.0;
}
dtrsm("R", "L", "C", "U", &rowNo, &supWdt, one,
cur, &nSupR, &cur[supWdt], &nSupR);
blocked_2by2_solver(supWdt, &D[curCol], &cur[supWdt], rowNo, nSupR, n);
}
delete[]contribs;
delete[]trn_diag;
delete[]xi;
delete[]map;
delete[]ws;
delete[]ipiv;
}
#ifdef TIMING1
endin = std::chrono::system_clock::now();
elapsed_seconds = endin-startin;
duration1=elapsed_seconds.count();
int thth2=omp_get_thread_num();
std::cout<<"**"<<thth2<<" : "<<j1<<" "<<duration1<<"\n";
#endif
}
}
#if 1
//LAst iteration
MKL_Domain_Set_Num_Threads(threads, MKL_DOMAIN_BLAS);
map = new int[n]();
contribs = new double[super_max * col_max]();
xi = new int[3 * supNo]();
trn_diag = new double[super_max * col_max]();
ws = new int[3 * super_max]();
ipiv = new int[super_max]();
for (int j1 = levelPtr[nLevels - 1]; j1 < levelPtr[nLevels]; ++j1) {
#ifdef TLAST
start = std::chrono::system_clock::now();
#endif
for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) {
int s = partition[k1] + 1;
if (!marked[s - 1])
continue;
int curCol = s != 0 ? blockSet[s - 1] : 0;
int nxtCol = blockSet[s];
int supWdt = nxtCol - curCol;
int nSupR = Li_ptr[nxtCol] - Li_ptr[curCol];//row size of supernode
for (int i = Li_ptr[curCol], cnt = 0; i < Li_ptr[nxtCol]; ++i) {
map[lR[i]] = cnt++;//mapping L rows position to actual row idx
}
double *src, *cur = &lValues[lC[curCol]], *cur_d = &D[curCol];//pointing to first element of the current supernode
//Reseting the current supernode.
for (int i = 0; i < supWdt; ++i) {
cur_d[i] = 0;
cur_d[i + n] = 0;
for (int j = 0; j < nSupR; ++j) {
cur[i * nSupR + j] = 0.0;
}
}
//copy the columns from A to L
for (int i = curCol; i < nxtCol; ++i) {//Copy A to L
int pad = i - curCol;
for (int j = c[i]; j < c[i + 1]; ++j) {
lValues[lC[i] + map[r[j]]] = values[j];
}
}
//double *src, *cur=&lValues[lC[curCol]];//pointing to first element of the current supernode
top = ereach_sn(supNo, cT, rT, curCol, nxtCol, col2Sup, aTree, xi, xi + supNo);
assert(top >= 0);
//int *lbs = xi+supNo, *ubs = xi + 2*supNo;//To use for row permutation
//if(s==2){top =2; xi[top] = 0;}
for (int i = top; i < supNo; ++i) {
int lSN = xi[i];
int nSupRs = 0;
int cSN = blockSet[lSN];//first col of current SN
int cNSN = blockSet[lSN + 1];//first col of Next SN
int Li_ptr_cNSN = Li_ptr[cNSN];
int Li_ptr_cSN = Li_ptr[cSN];
int nSNRCur = Li_ptr_cNSN - Li_ptr_cSN;
int supWdts = cNSN - cSN;//The width of current src SN
int lb = 0, ub = 0;
bool sw = true;
int beg_col = cSN, end_col = 0;
for (int j = Li_ptr_cSN; j < Li_ptr_cNSN; ++j) {
//finding the overlap between curCol and curCol+supWdt in the src col
if (lR[j] >= curCol && sw) {
//src*transpose(row lR[j])
lb = j - Li_ptr_cSN;
//lbs[i] = lb;
sw = false;
}
if (lR[j] < curCol + supWdt && !sw) {
ub = j - Li_ptr_cSN;
//ubs[i] = ub;
}
}
nSupRs = Li_ptr_cNSN - Li_ptr_cSN - lb;
int ndrow1 = ub - lb + 1;
int ndrow3 = nSupRs - ndrow1;
src = &lValues[lC[cSN] + lb];//first element of src supernode starting from row lb
double *srcL = &lValues[lC[cSN] + ub + 1];
blocked_2by2_mult(supWdts, nSupRs, &D[cSN], src, trn_diag, nSNRCur, n);
dgemm("N", "C", &nSupRs, &ndrow1, &supWdts, one, trn_diag, &nSupRs,
src, &nSNRCur, zero, contribs, &nSupRs);
// }
//copying contrib to L
for (int i = 0; i < ndrow1; ++i) {//Copy contribs to L
int col = map[lR[Li_ptr_cSN + i + lb]];//col in the SN
//double ddiag = 1.0 ;/// D[col];
for (int j = i; j < nSupRs; ++j) {
int cRow = lR[Li_ptr_cSN + j + lb];//corresponding row in SN
//lValues[lC[curCol+col]+ map[cRow]] -= contribs[i*nSupRs+j];
cur[col * nSupR + map[cRow]] -= contribs[i * nSupRs + j];
/* if ( cRow == 78){
std::cout<<"\n====="<<cSN<<"|| "<< cRow<<";;"<<contribs[i*nSupRs+j]<<";;"
<<cur[col*nSupR+map[cRow]]<<";;"<<"\n";
}*/
}
}
}
LAPACKE_dsytrf(LAPACK_COL_MAJOR, 'L', supWdt, cur, nSupR, ipiv);
int is_perm = reorder_after_sytrf(supWdt, cur, nSupR, ipiv,
&perm_piv[curCol], &D[curCol], n, &swap_full[curCol], ws + supWdt);
// re-order the columns of the super-node
int rowNo = nSupR - supWdt;
for (int m = 0; m < supWdt; ++m) {
perm_piv[curCol + m]++;
}
if (is_perm) {
LAPACKE_dlapmt(LAPACK_COL_MAJOR, 1, rowNo, supWdt, &cur[supWdt], nSupR, &perm_piv[curCol]);
perm_req.push_back(s);
}
//reordering row
for (int k1 = 0; k1 < supWdt; ++k1) {
perm_piv[curCol + k1] += (curCol - 1);
// perm_piv++;
}
for (int l = 0; l < supWdt; ++l) {
D[curCol + l] = cur[l + l * nSupR];
cur[l + l * nSupR] = 1.0;
}
dtrsm("R", "L", "C", "U", &rowNo, &supWdt, one,
cur, &nSupR, &cur[supWdt], &nSupR);
blocked_2by2_solver(supWdt, &D[curCol], &cur[supWdt], rowNo, nSupR, n);
}
#ifdef TLAST
end = std::chrono::system_clock::now();
elapsed_seconds = end-start;
duration1=elapsed_seconds.count();
std::cout<<"++ " <<duration1<<"\n";
#endif
}
for (int k = 0; k < super_max; ++k) {
ws[k] = 0;
}
nbpivot = perm_req.size();
row_reordering(supNo, lC, Li_ptr, lR, blockSet, aTree, cT, rT, col2Sup,
lValues, perm_req, swap_full, xi, map, ws, contribs);
perm_req.clear();
delete[]contribs;
delete[]trn_diag;
delete[]xi;
delete[]map;
delete[]ws;
delete[]ipiv;
delete[]swap_full;
#endif
return true;
}
}
#endif //PROJECT_PARALLEL_UPDATE_LDL_02_2_H
|
blockchain_fmt_plug.c | /* blockchain "My Wallet" cracker patch for JtR. Hacked together during June of
* 2013 by Dhiru Kholia <dhiru at openwall.com>.
*
* See https://blockchain.info/wallet/wallet-format
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Improved detection, added iteration count and handle v2 hashes, Feb, 2015, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_blockchain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_blockchain);
#else
#include <string.h>
#include <errno.h>
#include "arch.h"
#include "jumbo.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "pbkdf2_hmac_sha1.h"
#include "blockchain_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4 // this is a slow format
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Blockchain"
#define FORMAT_NAME "My Wallet"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (x10)"
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked = mem_calloc_align(sizeof(*cracked),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int lens[MAX_KEYS_PER_CRYPT], i;
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = master[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
cur_salt->data, 16, cur_salt->iter, pout, 32, 0);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if (blockchain_decrypt(master[i], cur_salt->data) == 0)
cracked[i+index] = 1;
else
cracked[i+index] = 0;
}
#else
unsigned char master[32];
pbkdf2_sha1((unsigned char *)saved_key[index],
strlen(saved_key[index]),
cur_salt->data, 16,
cur_salt->iter, master, 32, 0);
if (blockchain_decrypt(master, cur_salt->data) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void blockchain_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_blockchain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
blockchain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
blockchain_common_valid,
fmt_default_split,
fmt_default_binary,
blockchain_common_get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
blockchain_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ccsd_pack.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
/*
* a * v1 + b * v2.transpose(0,2,1,3)
*/
void CCmake_0213(double *out, double *v1, double *v2, int count, int m,
double a, double b)
{
#pragma omp parallel default(none) \
shared(count, m, out, v1, v2, a, b)
{
int i, j, k, l, n;
size_t d2 = m * m;
size_t d1 = m * m * m;
double *pv1, *pv2, *pout;
#pragma omp for schedule (static)
for (i = 0; i < count; i++) {
for (n = 0, j = 0; j < m; j++) {
for (k = 0; k < m; k++) {
pout = out + d1*i + d2*j + m*k;
pv1 = v1 + d1*i + d2*j + m*k;
pv2 = v2 + d1*i + d2*k + m*j;
for (l = 0; l < m; l++, n++) {
pout[l] = pv1[l] * a + pv2[l] * b;
}
} } }
}
}
/*
* out = v1 + v2.transpose(0,2,1)
*/
void CCsum021(double *out, double *v1, double *v2, int count, int m)
{
#pragma omp parallel default(none) \
shared(count, m, out, v1, v2)
{
int i, j, k, n;
size_t mm = m * m;
double *pout, *pv1, *pv2;
#pragma omp for schedule (static)
for (i = 0; i < count; i++) {
pout = out + mm * i;
pv1 = v1 + mm * i;
pv2 = v2 + mm * i;
for (n = 0, j = 0; j < m; j++) {
for (k = 0; k < m; k++, n++) {
pout[n] = pv1[n] + pv2[k*m+j];
} }
}
}
}
/*
* g2 = a * v1 + b * v2.transpose(0,2,1)
*/
void CCmake_021(double *out, double *v1, double *v2, int count, int m,
double a, double b)
{
if (a == 1 && b == 1) {
return CCsum021(out, v1, v2, count, m);
}
#pragma omp parallel default(none) \
shared(count, m, out, v1, v2, a, b)
{
int i, j, k, n;
size_t mm = m * m;
double *pout, *pv1, *pv2;
#pragma omp for schedule (static)
for (i = 0; i < count; i++) {
pout = out + mm * i;
pv1 = v1 + mm * i;
pv2 = v2 + mm * i;
for (n = 0, j = 0; j < m; j++) {
for (k = 0; k < m; k++, n++) {
pout[n] = pv1[n] * a + pv2[k*m+j] * b;
} }
}
}
}
/*
* if matrix B is symmetric for the contraction A_ij B_ij,
* Tr(AB) ~ A_ii B_ii + (A_ij + A_ji) B_ij where i > j
* This function extract the A_ii and the lower triangluar part of A_ij + A_ji
*/
void CCprecontract(double *out, double *in, int count, int m, double diagfac)
{
#pragma omp parallel default(none) \
shared(count, m, in, out, diagfac)
{
int i, j, k, n;
size_t mm = m * m;
size_t m2 = m * (m+1) / 2;
double *pout, *pin;
#pragma omp for schedule (static)
for (i = 0; i < count; i++) {
pout = out + m2 * i;
pin = in + mm * i;
for (n = 0, j = 0; j < m; j++) {
for (k = 0; k < j; k++, n++) {
pout[n] = pin[j*m+k] + pin[k*m+j];
}
pout[n] = pin[j*m+j] * diagfac;
n++;
}
}
}
}
/*
* if i1 == j1:
* eri = unpack_tril(eri, axis=0)
* unpack_tril(eri).reshape(i1-i0,j1-j0,nao,nao).transpose(0,2,1,3)
*/
void CCload_eri(double *out, double *eri, int *orbs_slice, int nao)
{
int i0 = orbs_slice[0];
int i1 = orbs_slice[1];
int j0 = orbs_slice[2];
int j1 = orbs_slice[3];
size_t ni = i1 - i0;
size_t nj = j1 - j0;
size_t nn = nj * nao;
size_t nao_pair = nao * (nao + 1) / 2;
#pragma omp parallel default(none) \
shared(out, eri, i1, j1, ni, nj, nn, nao, nao_pair)
{
int i, j, k, l, ij;
double *pout;
double *buf = malloc(sizeof(double) * nao*nao);
#pragma omp for schedule (static)
for (ij = 0; ij < ni*nj; ij++) {
i = ij / nj;
j = ij % nj;
NPdunpack_tril(nao, eri+ij*nao_pair, buf, 1);
pout = out + (i*nn+j)*nao;
for (k = 0; k < nao; k++) {
for (l = 0; l < nao; l++) {
pout[k*nn+l] = buf[k*nao+l];
} }
}
free(buf);
}
}
/*
* eri put virtual orbital first
* [ v ]
* [ v . ]
* [ v . . ]
* [ o . . . ]
* [ o . . . . ]
*/
void CCsd_sort_inplace(double *eri, int nocc, int nvir, int count)
{
#pragma omp parallel default(none) \
shared(eri, nocc, nvir, count)
{
int ic, i, j, ij;
size_t nmo = nocc + nvir;
size_t nmo_pair = nmo * (nmo+1) / 2;
size_t nocc_pair = nocc * (nocc+1) /2;
size_t nvir_pair = nvir * (nvir+1) /2;
double *peri, *pout;
double *buf = malloc(sizeof(double) * nocc*nvir);
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
peri = eri + ic*nmo_pair + nvir_pair;
for (i = 0; i < nocc; i++, peri+=nvir+i) {
for (j = 0; j < nvir; j++) {
buf[i*nvir+j] = peri[j];
}
}
pout = eri + ic*nmo_pair + nvir_pair;
peri = eri + ic*nmo_pair + nvir_pair + nvir;
for (ij = 0, i = 0; i < nocc; i++, peri+=nvir+i) {
for (j = 0; j <= i; j++, ij++) {
pout[ij] = peri[j];
}
}
pout = eri + ic*nmo_pair + nvir_pair + nocc_pair;
NPdcopy(pout, buf, nocc*nvir);
}
free(buf);
}
}
|
quicksort.c | /* C implementation QuickSort from http://w...content-available-to-author-only...s.org/quick-sort/ */
#include<stdio.h>
#include<stdlib.h>
#include<omp.h>
// A utility function to swap two elements
void swap(int* a, int* b)
{
int t = *a;
*a = *b;
*b = t;
}
/* This function takes last element as pivot, places
the pivot element at its correct position in sorted
array, and places all smaller (smaller than pivot)
to left of pivot and all greater elements to right
of pivot */
int partition (int arr[], int low, int high)
{
int pivot = arr[high]; // pivot
int i = (low - 1); // Index of smaller element
for (int j = low; j <= high- 1; j++)
{
// If current element is smaller than or
// equal to pivot
if (arr[j] <= pivot)
{
i++; // increment index of smaller element
swap(&arr[i], &arr[j]);
}
}
swap(&arr[i + 1], &arr[high]);
return (i + 1);
}
/* The main function that implements QuickSort
arr[] --> Array to be sorted,
low --> Starting index,
high --> Ending index */
void quickSort(int arr[], int low, int high)
{
if (low < high)
{
/* pi is partitioning index, arr[p] is now
at right place */
int pi = partition(arr, low, high);
// Separately sort elements before
// partition and after partition
#pragma omp parallel sections
{
#pragma omp section
quickSort(arr, low, pi - 1);
#pragma omp section
quickSort(arr, pi + 1, high);
}
}
}
/* Function to print an array */
void printArray(int arr[], int size)
{
int i;
for (i=0; i < size; i++)
printf("%d ", arr[i]);
printf("\n");
}
// Driver program to test above functions
int main()
{
int i,n = 10000000;
int *arr = (int*) malloc(n*sizeof(int));
for(i=0; i < n; i++)
arr[i] = rand()%n;
// omp_set_nested(1);
omp_set_num_threads(2);
quickSort(arr, 0, n-1);
//printf("Sorted array: \n");
//printArray(arr, n);
return 0;
}
|
8246.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
for (i = 0; i < _PB_N; i++)
{
#pragma omp target teams distribute thread_limit(256)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
for (j1 = 0; j1 < _PB_M; j1++)
{
#pragma omp target teams distribute thread_limit(256)
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
pooling_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void pooling3x3s2_max_neon(const Mat& bottom_blob, Mat& top_blob)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4x2_t _r0 = vld2q_f32(r0);
float32x4x2_t _r1 = vld2q_f32(r1);
float32x4x2_t _r2 = vld2q_f32(r2);
for (; nn>0; nn--)
{
float32x4x2_t _r0n = vld2q_f32(r0+8);
float32x4x2_t _r1n = vld2q_f32(r1+8);
float32x4x2_t _r2n = vld2q_f32(r2+8);
float32x4_t _max0 = vmaxq_f32(_r0.val[0], _r0.val[1]);
float32x4_t _max1 = vmaxq_f32(_r1.val[0], _r1.val[1]);
float32x4_t _max2 = vmaxq_f32(_r2.val[0], _r2.val[1]);
float32x4_t _r02 = vextq_f32(_r0.val[0], _r0n.val[0], 1);
float32x4_t _r12 = vextq_f32(_r1.val[0], _r1n.val[0], 1);
float32x4_t _r22 = vextq_f32(_r2.val[0], _r2n.val[0], 1);
_max0 = vmaxq_f32(_max0, _r02);
_max1 = vmaxq_f32(_max1, _r12);
_max2 = vmaxq_f32(_max2, _r22);
float32x4_t _max = vmaxq_f32(vmaxq_f32(_max0, _max1), _max2);
vst1q_f32(outptr, _max);
_r0 = _r0n;
_r1 = _r1n;
_r2 = _r2n;
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #256] \n"
"vld2.f32 {d0-d3}, [%1]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"pld [%3, #256] \n"
"vld2.f32 {d8-d11}, [%3]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld2.f32 {d12-d15}, [%1]! \n"// q6 = 8 10 12 14 q7 = 9 11 13 15
"vmax.f32 q12, q0, q1 \n"
"vmax.f32 q13, q2, q3 \n"
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"vmax.f32 q14, q4, q5 \n"
"vext.32 q0, q0, q6, #1 \n"
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3]! \n"
"vext.32 q2, q2, q8, #1 \n"
"vmax.f32 q12, q12, q0 \n"
"vext.32 q4, q4, q10, #1 \n"
"vmax.f32 q13, q13, q2 \n"
"vmax.f32 q14, q14, q4 \n"
"vmax.f32 q12, q12, q13 \n"
"vorr q0, q6, q6 \n"
"vorr q1, q7, q7 \n"
"vmax.f32 q12, q12, q14 \n"
"vorr q2, q8, q8 \n"
"vorr q3, q9, q9 \n"
"vorr q4, q10, q10 \n"
"vorr q5, q11, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d24-d25}, [%4]! \n"
"bne 0b \n"
"sub %1, #32 \n"
"sub %2, #32 \n"
"sub %3, #32 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(outptr) // %4
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = std::max(std::max(r0[0], r0[1]), r0[2]);
float max1 = std::max(std::max(r1[0], r1[1]), r1[2]);
float max2 = std::max(std::max(r2[0], r2[1]), r2[2]);
*outptr = std::max(std::max(max0, max1), max2);
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;//1 + w;
r1 += tailstep;//1 + w;
r2 += tailstep;//1 + w;
}
}
}
|
rose_accumulateForce.c | #include <omp.h>
void AccumulateForce(int *idxBound,int *idxList,int len,double *tmp,double *force)
{
#pragma omp parallel for private (jj) firstprivate (len)
for (register int ii = 0; ii <= len - 1; ii += 1) {
int count = idxBound[ii + 1] - idxBound[ii];
int *list = &idxList[idxBound[ii]];
double sum = 0.0;
#pragma omp parallel for reduction (+:sum) firstprivate (count)
for (register int jj = 0; jj <= count - 1; jj += 1) {
int idx = list[jj];
sum += tmp[list[jj]];
}
force[ii] += sum;
}
return ;
}
|
Scene.h | #pragma once
#include <vector>
#include <random>
#include <cmath>
#include "Vec3.h"
#include "Vertex.h"
#include "Camera.h"
#include "Ray.h"
#include "Triangle.h"
#include "Mesh.h"
#include "LightSource.h"
using namespace std;
void printProgressBar(float prop) {
int progress = round(50.0f * prop);
string progressBar = "";
for (int i=0; i<progress; i++) {
progressBar += "\u2588";
}
std::cout << "Raytracing... [" << progressBar << string(50 - progress, ' ') << "] " << progress * 2 << "%\r" << flush;
}
Vec3f sample_along(Vec3f v) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(0.0, 1.0);
v.normalize();
Vec3f v2, v3;
v.getTwoOrthogonals(v2, v3);
v2.normalize();
v3.normalize();
float theta = asin(dis(gen));
float phi = 2 * M_PI * dis(gen);
Vec3f dir = v2 * cos(phi) + v3 * sin(phi);
dir.normalize();
return normalize(v * cos(theta) + dir * sin(theta));
}
Vec3f jit_sample(int sample_idx, int n_samples) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(0.0, 1.0);
int d = int(sqrt(float(n_samples)));
int j2 = sample_idx / d;
int i2 = sample_idx % d;
float x = (float(i2) + dis(gen)) / float(d);
float y = (float(j2) + dis(gen)) / float(d);
return {x, y, 42};
}
class Scene {
public:
vector<Mesh> m_meshes;
Camera m_cam;
vector<LightSource> m_lights;
int m_n_samples; // Should be a square of an integer, so that jittered sampling works properly
Scene() {
m_meshes = vector<Mesh>();
m_cam = Camera();
m_lights = vector<LightSource>();
m_n_samples = 4;
}
void rayTrace(Image &im) {
// #pragma omp parallel for
for (int i = 0; i < im.m_width; i++) {
printProgressBar((float)(i + 1) / (float)im.m_width);
#pragma omp parallel for
for (int j = 0; j < im.m_height; j++) {
im.m_data[j * im.m_width + i] = {0, 0, 0};
#pragma omp parallel for
for (int sample_idx = 0; sample_idx < m_n_samples; sample_idx++) {
// Vec3f noise = jit_sample(sample_idx, m_n_samples);
Vec3f noise = {0.5, 0.5, 0}; // Remove the noise since we are taking only one sample
float x = (float(i) + noise[0]) / float(im.m_width);
float y = (float(j) + noise[1]) / float(im.m_height);
Ray rij = m_cam.launch_ray(x, y);
// im.m_data[j * im.m_width + i] += recurse_ray(rij, 0);
im.m_data[j * im.m_width + i] += ray_normal_only(rij);
}
im.m_data[j * im.m_width + i] *= 1 / float(m_n_samples);
}
}
}
/*
* Checks if ls is visible from v
*
*/
bool is_visible(Vec3f v, Vec3f ls) {
Ray rvl = Ray(v, normalize(ls - v));
for (Mesh const &m : this->m_meshes) {
Vec3i t;
vector<float> intersection = m.m_bvh.intersection(rvl, t, m.m_vertices, m.m_triangles);
if (intersection.size() > 0) {
float t = intersection[3];
if (t <= (ls - v).length())
return false;
}
}
return true;
}
Vec3f colorize(const vector<float> &intersection, const Vec3i &t, const Mesh &m, Vec3f rayDir) {
// rng from https://en.cppreference.com/w/cpp/numeric/random/uniform_real_distribution
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(0.0, 1.0);
Vec3f normal_at_point = intersection[0] * m.m_vertices[t[0]].m_normal
+ intersection[1] * m.m_vertices[t[1]].m_normal
+ intersection[2] * m.m_vertices[t[2]].m_normal;
normal_at_point.normalize();
Vec3f intersection_position = intersection[0] * m.m_vertices[t[0]].m_point
+ intersection[1] * m.m_vertices[t[1]].m_point
+ intersection[2] * m.m_vertices[t[2]].m_point;
Vec3f overall_color;
Vec3f color;
for (LightSource light : this->m_lights) {
Vec3f random_source;
switch (light.m_type)
{
case L_AMBIENT:
color = light.m_intensity * light.m_color * max(0.0f, dot(normal_at_point, -rayDir));
color *= m.m_material.m_diffuse_coef * m.m_material.diffuse_response(intersection_position);
break;
case L_POINT:
if (is_visible(intersection_position + 2 * __FLT_EPSILON__ * normal_at_point, light.m_position)) {
color = light.m_intensity * light.m_color * max(0.0f, dot(normal_at_point, -rayDir));
color *= m.m_material.evaluateColorResponse(normal_at_point,
light.m_position - intersection_position,
-rayDir,
intersection_position);
}
else {
color = {0, 0, 0};
}
break;
case L_RECTANGLE:
random_source = light.m_position + float(dis(gen)) * light.m_vec1 + float(dis(gen)) * light.m_vec2;
if (is_visible(intersection_position + 2 * __FLT_EPSILON__ * normal_at_point, random_source)) {
color = light.m_intensity * light.m_color * max(0.0f, dot(normal_at_point, -rayDir));
color *= m.m_material.evaluateColorResponse(normal_at_point,
random_source - intersection_position,
-rayDir,
intersection_position);
}
else {
color = {0, 0, 0};
}
break;
default:
color = {0, 0, 0};
break;
}
overall_color += color;
}
return overall_color;
}
Vec3f recurse_ray(Ray r, int depth) {
if (depth > 3)
return {0, 0, 0};
Vec3f this_ray_color = {0, 0, 0};
vector<float> nearest_intersection = {};
Vec3i nearest_t;
const Mesh *nearest_m;
for (Mesh const &m : m_meshes) {
Vec3i t;
vector<float> intersection = m.m_bvh.intersection(r, t, m.m_vertices, m.m_triangles);
if (intersection.size() > 0) {
if (nearest_intersection.size() == 0 || nearest_intersection[3] > intersection[3]) {
nearest_intersection = intersection;
nearest_t = t;
nearest_m = &m;
}
}
}
if (nearest_intersection.size() > 0) {
this_ray_color = this->colorize(nearest_intersection, nearest_t, *nearest_m, r.m_direction);
Vec3f normal_at_point = nearest_intersection[0] * nearest_m->m_vertices[nearest_t[0]].m_normal
+ nearest_intersection[1] * nearest_m->m_vertices[nearest_t[1]].m_normal
+ nearest_intersection[2] * nearest_m->m_vertices[nearest_t[2]].m_normal;
normal_at_point.normalize();
Vec3f intersection_position = nearest_intersection[0] * nearest_m->m_vertices[nearest_t[0]].m_point
+ nearest_intersection[1] * nearest_m->m_vertices[nearest_t[1]].m_point
+ nearest_intersection[2] * nearest_m->m_vertices[nearest_t[2]].m_point;
// Sample along normal
Vec3f random_vector = sample_along(normal_at_point);
// random_vector = -r.m_direction + 2.0f * (r.m_direction - dot(r.m_direction, normal_at_point) * normal_at_point); // Perfect reflection
Vec3f recursed_color = recurse_ray(Ray(intersection_position, random_vector), depth + 1)
* nearest_m->m_material.evaluateColorResponse(normal_at_point,
random_vector,
-r.m_direction,
intersection_position);
return this_ray_color + recursed_color;
}
else {
return {0, 0, 0};
}
}
Vec3f ray_normal_only(Ray r) {
Vec3f this_ray_color = {0, 0, 0};
vector<float> nearest_intersection = {};
Vec3i nearest_t;
const Mesh *nearest_m;
int mesh_id = 0;
int nearest_id = -1;
for (Mesh const &m : m_meshes) {
Vec3i t;
vector<float> intersection = m.m_bvh.intersection(r, t, m.m_vertices, m.m_triangles);
if (intersection.size() > 0) {
if (nearest_intersection.size() == 0 || nearest_intersection[3] > intersection[3]) {
nearest_intersection = intersection;
nearest_t = t;
nearest_id = mesh_id;
nearest_m = &m;
}
}
mesh_id++;
}
if (nearest_intersection.size() > 0) {
Vec3f normal_at_point = nearest_intersection[0] * nearest_m->m_vertices[nearest_t[0]].m_normal
+ nearest_intersection[1] * nearest_m->m_vertices[nearest_t[1]].m_normal
+ nearest_intersection[2] * nearest_m->m_vertices[nearest_t[2]].m_normal;
normal_at_point.normalize();
return {dot(-m_cam.reference_frame()[1], normal_at_point),
dot(-m_cam.reference_frame()[2], normal_at_point),
float(nearest_id)}; // We do not need the third channel to know where we are on the picture, so we use it to know which mesh we are on
}
else {
return {0, 0, 0};
}
}
};
|
parallel_reduction_messages.c | // RUN: %clang_cc1 -verify -fopenmp -ferror-limit 150 -o - %s
int incomplete[];
void test() {
#pragma omp parallel reduction(+ : incomplete) // expected-error {{a reduction list item with incomplete type 'int []'}}
;
}
// complete to suppress an additional warning, but it's too late for pragmas
int incomplete[3];
|
parallel_for.h | /*
Copyright (c) 2013, Taiga Nomi and the respective contributors
All rights reserved.
Use of this source code is governed by a BSD-style license that can be found
in the LICENSE file.
*/
#pragma once
#include <cassert>
#include <cstdio>
#include <limits>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "tiny_dnn/config.h"
#include "tiny_dnn/util/aligned_allocator.h"
#include "tiny_dnn/util/nn_error.h"
#ifdef CNN_USE_TBB
#ifndef NOMINMAX
#define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h
#endif
#include <tbb/task_group.h>
#include <tbb/tbb.h>
#endif
#if !defined(CNN_USE_OMP) && !defined(CNN_SINGLE_THREAD)
#include <future> // NOLINT
#include <thread> // NOLINT
#endif
#if defined(CNN_USE_GCD) && !defined(CNN_SINGLE_THREAD)
#include <dispatch/dispatch.h>
#endif
namespace tiny_dnn {
#ifdef CNN_USE_TBB
static tbb::task_scheduler_init tbbScheduler(
tbb::task_scheduler_init::automatic); // tbb::task_scheduler_init::deferred);
typedef tbb::blocked_range<size_t> blocked_range;
template <typename Func>
void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) {
assert(end >= begin);
tbb::parallel_for(
blocked_range(begin, end, end - begin > grainsize ? grainsize : 1), f);
}
template <typename Func>
void xparallel_for(size_t begin, size_t end, const Func &f) {
f(blocked_range(begin, end, 100));
}
#else
struct blocked_range {
typedef size_t const_iterator;
blocked_range(size_t begin, size_t end) : begin_(begin), end_(end) {}
blocked_range(int begin, int end) : begin_(begin), end_(end) {}
const_iterator begin() const { return begin_; }
const_iterator end() const { return end_; }
private:
size_t begin_;
size_t end_;
};
template <typename Func>
void xparallel_for(size_t begin, size_t end, const Func &f) {
blocked_range r(begin, end);
f(r);
}
#if defined(CNN_USE_OMP)
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
assert(end >= begin);
// unsigned index isn't allowed in OpenMP 2.0
#pragma omp parallel for
for (int i = static_cast<int>(begin); i < static_cast<int>(end); ++i)
f(blocked_range(i, i + 1));
}
#elif defined(CNN_USE_GCD)
template <typename Func>
void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) {
assert(end >= begin);
size_t count = end - begin;
size_t blockSize = grainsize;
if (count < blockSize || blockSize == 0) {
blockSize = 1;
}
size_t blockCount = (count + blockSize - 1) / blockSize;
assert(blockCount > 0);
dispatch_apply(blockCount, dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0),
^(size_t block) {
size_t blockStart = block * blockSize;
size_t blockEnd = blockStart + blockSize;
if (blockEnd > end) {
blockEnd = end;
}
assert(blockStart < blockEnd);
f(blocked_range(blockStart, blockEnd));
});
}
#elif defined(CNN_SINGLE_THREAD)
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
xparallel_for(begin, end, f);
}
#else
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
assert(end >= begin);
size_t nthreads = std::thread::hardware_concurrency();
size_t blockSize = (end - begin) / nthreads;
if (blockSize * nthreads < end - begin) blockSize++;
std::vector<std::future<void> > futures;
size_t blockBegin = begin;
size_t blockEnd = blockBegin + blockSize;
if (blockEnd > end) blockEnd = end;
for (size_t i = 0; i < nthreads; i++) {
futures.push_back(
std::move(std::async(std::launch::async, [blockBegin, blockEnd, &f] {
f(blocked_range(blockBegin, blockEnd));
})));
blockBegin += blockSize;
blockEnd = blockBegin + blockSize;
if (blockBegin >= end) break;
if (blockEnd > end) blockEnd = end;
}
for (auto &future : futures) future.wait();
}
#endif
#endif // CNN_USE_TBB
template <typename T, typename U>
bool value_representation(U const &value) {
return static_cast<U>(static_cast<T>(value)) == value;
}
template <typename T, typename Func>
inline void for_(
bool parallelize, size_t begin, T end, Func f, size_t grainsize = 100) {
static_assert(std::is_integral<T>::value, "end must be integral type");
parallelize = parallelize && value_representation<size_t>(end);
parallelize ? parallel_for(begin, end, f, grainsize)
: xparallel_for(begin, end, f);
}
template <typename T, typename Func>
inline void for_i(bool parallelize, T size, Func f, size_t grainsize = 100u) {
#ifdef CNN_SINGLE_THREAD
for (size_t i = 0; i < size; ++i) {
f(i);
}
#else // #ifdef CNN_SINGLE_THREAD
for_(parallelize, 0u, size,
[&](const blocked_range &r) {
#ifdef CNN_USE_OMP
#pragma omp parallel for
for (int i = static_cast<int>(r.begin());
i < static_cast<int>(r.end()); i++) {
f(i);
}
#else
for (size_t i = r.begin(); i < r.end(); i++) {
f(i);
}
#endif
},
grainsize);
#endif // #ifdef CNN_SINGLE_THREAD
}
template <typename T, typename Func>
inline void for_i(T size, Func f, size_t grainsize = 100) {
for_i(true, size, f, grainsize);
}
} // namespace tiny_dnn
|
3781.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(256)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp target teams distribute thread_limit(256)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(256)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
Stencil_par2.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "malloc2D.h"
#include "timer.h"
int main(int argc, char *argv[])
{
struct timespec tstart_cpu, tstop_cpu;
double cpu_time;
int imax=2002, jmax = 2002;
int niter=1000, nburst=100;
double** restrict x = malloc2D(jmax, imax);
double** restrict xnew = malloc2D(jmax, imax);
#pragma omp target enter data map(to:x[0:jmax][0:imax], xnew[0:jmax][0:imax])
#pragma omp target teams
{
#pragma omp distribute parallel for simd
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
xnew[j][i] = 0.0;
x[j][i] = 5.0;
}
}
#pragma omp distribute parallel for simd
for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){
for (int i = imax/2 - 5; i < imax/2 -1; i++){
x[j][i] = 400.0;
}
}
} // omp target teams
for (int iter = 0; iter < niter; iter+=nburst){
for (int ib = 0; ib < nburst; ib++){
cpu_timer_start(&tstart_cpu);
#pragma omp target teams distribute parallel for simd
for (int j = 1; j < jmax-1; j++){
for (int i = 1; i < imax-1; i++){
xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0;
}
}
#pragma omp target teams distribute parallel for simd
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
x[j][i] = xnew[j][i];
}
}
cpu_time += cpu_timer_stop(tstart_cpu);
}
printf("Iter %d\n",iter+nburst);
}
#pragma omp target exit data map(from:x[0:jmax][0:imax], xnew[0:jmax][0:imax])
free(x);
free(xnew);
printf("Timing is %lf\n",cpu_time);
}
|
mixed_tentusscher_myo_epi_2004_S2_6.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S2_6.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GB_binop__bshift_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bshift_uint16
// A.*B function (eWiseMult): GB_AemultB__bshift_uint16
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bshift_uint16
// C+=b function (dense accum): GB_Cdense_accumb__bshift_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_uint16
// C=scalar+B GB_bind1st__bshift_uint16
// C=scalar+B' GB_bind1st_tran__bshift_uint16
// C=A+scalar GB_bind2nd__bshift_uint16
// C=A'+scalar GB_bind2nd_tran__bshift_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_uint16 (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_bitshift_uint16 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bshift_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bshift_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_uint16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bshift_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = GB_bitshift_uint16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint16 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__bshift_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint16 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
column_matrix.h | /*!
* Copyright 2017 by Contributors
* \file column_matrix.h
* \brief Utility for fast column-wise access
* \author Philip Cho
*/
#ifndef XGBOOST_COMMON_COLUMN_MATRIX_H_
#define XGBOOST_COMMON_COLUMN_MATRIX_H_
#include <limits>
#include <vector>
#include <memory>
#include "hist_util.h"
namespace xgboost {
namespace common {
class ColumnMatrix;
/*! \brief column type */
enum ColumnType {
kDenseColumn,
kSparseColumn
};
/*! \brief a column storage, to be used with ApplySplit. Note that each
bin id is stored as index[i] + index_base.
Different types of column index for each column allow
to reduce the memory usage. */
template <typename BinIdxType>
class Column {
public:
Column(ColumnType type, common::Span<const BinIdxType> index, const uint32_t index_base)
: type_(type),
index_(index),
index_base_(index_base) {}
uint32_t GetGlobalBinIdx(size_t idx) const {
return index_base_ + static_cast<uint32_t>(index_[idx]);
}
BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; }
const uint32_t GetBaseIdx() const { return index_base_; }
common::Span<const BinIdxType> GetFeatureBinIdxPtr() const { return index_; }
ColumnType GetType() const { return type_; }
/* returns number of elements in column */
size_t Size() const { return index_.size(); }
private:
/* type of column */
ColumnType type_;
/* bin indexes in range [0, max_bins - 1] */
common::Span<const BinIdxType> index_;
/* bin index offset for specific feature */
const uint32_t index_base_;
};
template <typename BinIdxType>
class SparseColumn: public Column<BinIdxType> {
public:
SparseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base, common::Span<const size_t> row_ind)
: Column<BinIdxType>(type, index, index_base),
row_ind_(row_ind) {}
const size_t* GetRowData() const { return row_ind_.data(); }
size_t GetRowIdx(size_t idx) const {
return row_ind_.data()[idx];
}
private:
/* indexes of rows */
common::Span<const size_t> row_ind_;
};
template <typename BinIdxType>
class DenseColumn: public Column<BinIdxType> {
public:
DenseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base,
const std::vector<bool>::const_iterator missing_flags)
: Column<BinIdxType>(type, index, index_base),
missing_flags_(missing_flags) {}
bool IsMissing(size_t idx) const { return missing_flags_[idx]; }
private:
/* flags for missing values in dense columns */
std::vector<bool>::const_iterator missing_flags_;
};
/*! \brief a collection of columns, with support for construction from
GHistIndexMatrix. */
class ColumnMatrix {
public:
// get number of features
inline bst_uint GetNumFeature() const {
return static_cast<bst_uint>(type_.size());
}
// construct column matrix from GHistIndexMatrix
inline void Init(const GHistIndexMatrix& gmat,
double sparse_threshold) {
const int32_t nfeature = static_cast<int32_t>(gmat.cut.Ptrs().size() - 1);
const size_t nrow = gmat.row_ptr.size() - 1;
// identify type of each column
feature_counts_.resize(nfeature);
type_.resize(nfeature);
std::fill(feature_counts_.begin(), feature_counts_.end(), 0);
uint32_t max_val = std::numeric_limits<uint32_t>::max();
for (int32_t fid = 0; fid < nfeature; ++fid) {
CHECK_LE(gmat.cut.Ptrs()[fid + 1] - gmat.cut.Ptrs()[fid], max_val);
}
bool all_dense = gmat.IsDense();
gmat.GetFeatureCounts(&feature_counts_[0]);
// classify features
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (static_cast<double>(feature_counts_[fid])
< sparse_threshold * nrow) {
type_[fid] = kSparseColumn;
all_dense = false;
} else {
type_[fid] = kDenseColumn;
}
}
// want to compute storage boundary for each feature
// using variants of prefix sum scan
feature_offsets_.resize(nfeature + 1);
size_t accum_index_ = 0;
feature_offsets_[0] = accum_index_;
for (int32_t fid = 1; fid < nfeature + 1; ++fid) {
if (type_[fid - 1] == kDenseColumn) {
accum_index_ += static_cast<size_t>(nrow);
} else {
accum_index_ += feature_counts_[fid - 1];
}
feature_offsets_[fid] = accum_index_;
}
SetTypeSize(gmat.max_num_bins);
index_.resize(feature_offsets_[nfeature] * bins_type_size_, 0);
if (!all_dense) {
row_ind_.resize(feature_offsets_[nfeature]);
}
// store least bin id for each feature
index_base_ = const_cast<uint32_t*>(gmat.cut.Ptrs().data());
const bool noMissingValues = NoMissingValues(gmat.row_ptr[nrow], nrow, nfeature);
if (noMissingValues) {
missing_flags_.resize(feature_offsets_[nfeature], false);
} else {
missing_flags_.resize(feature_offsets_[nfeature], true);
}
// pre-fill index_ for dense columns
if (all_dense) {
BinTypeSize gmat_bin_size = gmat.index.GetBinTypeSize();
if (gmat_bin_size == kUint8BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint8_t>(), gmat, nrow, nfeature, noMissingValues);
} else if (gmat_bin_size == kUint16BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint16_t>(), gmat, nrow, nfeature, noMissingValues);
} else {
CHECK_EQ(gmat_bin_size, kUint32BinsTypeSize);
SetIndexAllDense(gmat.index.data<uint32_t>(), gmat, nrow, nfeature, noMissingValues);
}
/* For sparse DMatrix gmat.index.getBinTypeSize() returns always kUint32BinsTypeSize
but for ColumnMatrix we still have a chance to reduce the memory consumption */
} else {
if (bins_type_size_ == kUint8BinsTypeSize) {
SetIndex<uint8_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
} else if (bins_type_size_ == kUint16BinsTypeSize) {
SetIndex<uint16_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
} else {
CHECK_EQ(bins_type_size_, kUint32BinsTypeSize);
SetIndex<uint32_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
}
}
}
/* Set the number of bytes based on numeric limit of maximum number of bins provided by user */
void SetTypeSize(size_t max_num_bins) {
if ( (max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint8_t>::max()) ) {
bins_type_size_ = kUint8BinsTypeSize;
} else if ((max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint16_t>::max())) {
bins_type_size_ = kUint16BinsTypeSize;
} else {
bins_type_size_ = kUint32BinsTypeSize;
}
}
/* Fetch an individual column. This code should be used with type swith
to determine type of bin id's */
template <typename BinIdxType>
std::unique_ptr<const Column<BinIdxType> > GetColumn(unsigned fid) const {
CHECK_EQ(sizeof(BinIdxType), bins_type_size_);
const size_t feature_offset = feature_offsets_[fid]; // to get right place for certain feature
const size_t column_size = feature_offsets_[fid + 1] - feature_offset;
common::Span<const BinIdxType> bin_index = { reinterpret_cast<const BinIdxType*>(
&index_[feature_offset * bins_type_size_]),
column_size };
std::unique_ptr<const Column<BinIdxType> > res;
if (type_[fid] == ColumnType::kDenseColumn) {
std::vector<bool>::const_iterator column_iterator = missing_flags_.begin();
advance(column_iterator, feature_offset); // increment iterator to right position
res.reset(new DenseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
column_iterator));
} else {
res.reset(new SparseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
{&row_ind_[feature_offset], column_size}));
}
return res;
}
template<typename T>
inline void SetIndexAllDense(T* index, const GHistIndexMatrix& gmat, const size_t nrow,
const size_t nfeature, const bool noMissingValues) {
T* local_index = reinterpret_cast<T*>(&index_[0]);
/* missing values make sense only for column with type kDenseColumn,
and if no missing values were observed it could be handled much faster. */
if (noMissingValues) {
#pragma omp parallel for num_threads(omp_get_max_threads())
for (omp_ulong rid = 0; rid < nrow; ++rid) {
const size_t ibegin = rid*nfeature;
const size_t iend = (rid+1)*nfeature;
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const size_t idx = feature_offsets_[j];
local_index[idx + rid] = index[i];
}
}
} else {
/* to handle rows in all batches, sum of all batch sizes equal to gmat.row_ptr.size() - 1 */
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
size_t fid = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
fid = inst[j].index;
const size_t idx = feature_offsets_[fid];
/* rbegin allows to store indexes from specific SparsePage batch */
local_index[idx + rbegin + rid] = index[i];
missing_flags_[idx + rbegin + rid] = false;
}
}
rbegin += batch.Size();
}
}
}
template<typename T>
inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat,
const size_t nrow, const size_t nfeature) {
std::vector<size_t> num_nonzeros;
num_nonzeros.resize(nfeature);
std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0);
T* local_index = reinterpret_cast<T*>(&index_[0]);
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
size_t fid = 0;
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const uint32_t bin_id = index[i];
fid = inst[j].index;
if (type_[fid] == kDenseColumn) {
T* begin = &local_index[feature_offsets_[fid]];
begin[rid + rbegin] = bin_id - index_base_[fid];
missing_flags_[feature_offsets_[fid] + rid + rbegin] = false;
} else {
T* begin = &local_index[feature_offsets_[fid]];
begin[num_nonzeros[fid]] = bin_id - index_base_[fid];
row_ind_[feature_offsets_[fid] + num_nonzeros[fid]] = rid + rbegin;
++num_nonzeros[fid];
}
}
}
rbegin += batch.Size();
}
}
const BinTypeSize GetTypeSize() const {
return bins_type_size_;
}
const bool NoMissingValues(const size_t n_elements,
const size_t n_row, const size_t n_features) {
return n_elements == n_features * n_row;
}
private:
std::vector<uint8_t> index_;
std::vector<size_t> feature_counts_;
std::vector<ColumnType> type_;
std::vector<size_t> row_ind_;
/* indicate where each column's index and row_ind is stored. */
std::vector<size_t> feature_offsets_;
// index_base_[fid]: least bin id for feature fid
uint32_t* index_base_;
std::vector<bool> missing_flags_;
BinTypeSize bins_type_size_;
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_COLUMN_MATRIX_H_
|
GB_binop__bor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int8)
// C=scalar+B GB (_bind1st__bor_int8)
// C=scalar+B' GB (_bind1st_tran__bor_int8)
// C=A+scalar GB (_bind2nd__bor_int8)
// C=A'+scalar GB (_bind2nd_tran__bor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
concurrent_unordered_map.cuh.h | /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CONCURRENT_UNORDERED_MAP_CUH
#define CONCURRENT_UNORDERED_MAP_CUH
#include <thrust/pair.h>
#include <cassert>
#include <iostream>
#include <iterator>
#include <type_traits>
#include "hash_functions.cuh"
#include "managed.cuh"
#include "managed_allocator.cuh"
// TODO: replace this with CUDA_TRY and propagate the error
#ifndef CUDA_RT_CALL
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) { \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), \
cudaStatus); \
exit(1); \
} \
}
#endif
// TODO: can we do this more efficiently?
__inline__ __device__ int8_t atomicCAS(int8_t* address, int8_t compare,
int8_t val) {
int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t)address & 3) * 8);
int32_t int_comp = (int32_t)compare << (((size_t)address & 3) * 8);
return (int8_t)atomicCAS(base_address, int_comp, int_val);
}
// TODO: can we do this more efficiently?
__inline__ __device__ int16_t atomicCAS(int16_t* address, int16_t compare,
int16_t val) {
int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 2));
int32_t int_val = (int32_t)val << (((size_t)address & 2) * 8);
int32_t int_comp = (int32_t)compare << (((size_t)address & 2) * 8);
return (int16_t)atomicCAS(base_address, int_comp, int_val);
}
__inline__ __device__ int64_t atomicCAS(int64_t* address, int64_t compare,
int64_t val) {
return (int64_t)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ uint64_t atomicCAS(uint64_t* address, uint64_t compare,
uint64_t val) {
return (uint64_t)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ long long int atomicCAS(long long int* address,
long long int compare,
long long int val) {
return (long long int)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ double atomicCAS(double* address, double compare,
double val) {
return __longlong_as_double(atomicCAS((unsigned long long int*)address,
__double_as_longlong(compare),
__double_as_longlong(val)));
}
__inline__ __device__ float atomicCAS(float* address, float compare,
float val) {
return __int_as_float(
atomicCAS((int*)address, __float_as_int(compare), __float_as_int(val)));
}
__inline__ __device__ int64_t atomicAdd(int64_t* address, int64_t val) {
return (int64_t)atomicAdd((unsigned long long*)address,
(unsigned long long)val);
}
__inline__ __device__ uint64_t atomicAdd(uint64_t* address, uint64_t val) {
return (uint64_t)atomicAdd((unsigned long long*)address,
(unsigned long long)val);
}
template <typename pair_type>
__forceinline__ __device__ pair_type
load_pair_vectorized(const pair_type* __restrict__ const ptr) {
if (sizeof(uint4) == sizeof(pair_type)) {
union pair_type2vec_type {
uint4 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0, 0, 0};
converter.vec_val = *reinterpret_cast<const uint4*>(ptr);
return converter.pair_val;
} else if (sizeof(uint2) == sizeof(pair_type)) {
union pair_type2vec_type {
uint2 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0};
converter.vec_val = *reinterpret_cast<const uint2*>(ptr);
return converter.pair_val;
} else if (sizeof(int) == sizeof(pair_type)) {
union pair_type2vec_type {
int vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.vec_val = *reinterpret_cast<const int*>(ptr);
return converter.pair_val;
} else if (sizeof(short) == sizeof(pair_type)) {
union pair_type2vec_type {
short vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.vec_val = *reinterpret_cast<const short*>(ptr);
return converter.pair_val;
} else {
return *ptr;
}
}
template <typename pair_type>
__forceinline__ __device__ void store_pair_vectorized(
pair_type* __restrict__ const ptr, const pair_type val) {
if (sizeof(uint4) == sizeof(pair_type)) {
union pair_type2vec_type {
uint4 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0, 0, 0};
converter.pair_val = val;
*reinterpret_cast<uint4*>(ptr) = converter.vec_val;
} else if (sizeof(uint2) == sizeof(pair_type)) {
union pair_type2vec_type {
uint2 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0};
converter.pair_val = val;
*reinterpret_cast<uint2*>(ptr) = converter.vec_val;
} else if (sizeof(int) == sizeof(pair_type)) {
union pair_type2vec_type {
int vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.pair_val = val;
*reinterpret_cast<int*>(ptr) = converter.vec_val;
} else if (sizeof(short) == sizeof(pair_type)) {
union pair_type2vec_type {
short vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.pair_val = val;
*reinterpret_cast<short*>(ptr) = converter.vec_val;
} else {
*ptr = val;
}
}
template <typename value_type, typename size_type, typename key_type,
typename elem_type>
__global__ void init_hashtbl( // Init every entry of the table with
// <unused_key, unused_value> pair
value_type* __restrict__ const hashtbl_values, const size_type n,
const key_type key_val, const elem_type elem_val) {
const size_type idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
store_pair_vectorized(
hashtbl_values + idx,
thrust::make_pair(
key_val, elem_val)); // Simply store every element a <K, V> pair
}
}
template <typename T>
struct equal_to {
using result_type = bool;
using first_argument_type = T;
using second_argument_type = T;
__forceinline__ __host__ __device__ constexpr bool operator()(
const first_argument_type& lhs, const second_argument_type& rhs) const {
return lhs == rhs;
}
};
template <typename Iterator>
class cycle_iterator_adapter {
public:
using value_type = typename std::iterator_traits<Iterator>::value_type;
using difference_type =
typename std::iterator_traits<Iterator>::difference_type;
using pointer = typename std::iterator_traits<Iterator>::pointer;
using reference = typename std::iterator_traits<Iterator>::reference;
using iterator_type = Iterator;
cycle_iterator_adapter() = delete;
__host__ __device__ explicit cycle_iterator_adapter(
const iterator_type& begin, const iterator_type& end,
const iterator_type& current)
: m_begin(begin), m_end(end), m_current(current) {}
__host__ __device__ cycle_iterator_adapter& operator++() {
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return *this;
}
__host__ __device__ const cycle_iterator_adapter& operator++() const {
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return *this;
}
__host__ __device__ cycle_iterator_adapter& operator++(int) {
cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current);
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return old;
}
__host__ __device__ const cycle_iterator_adapter& operator++(int)const {
cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current);
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return old;
}
__host__ __device__ bool equal(
const cycle_iterator_adapter<iterator_type>& other) const {
return m_current == other.m_current && m_begin == other.m_begin &&
m_end == other.m_end;
}
__host__ __device__ reference& operator*() { return *m_current; }
__host__ __device__ const reference& operator*() const { return *m_current; }
__host__ __device__ const pointer operator->() const {
return m_current.operator->();
}
__host__ __device__ pointer operator->() { return m_current; }
__host__ __device__ iterator_type getter() const { return m_current; }
private:
iterator_type m_current;
iterator_type m_begin;
iterator_type m_end;
};
template <class T>
__host__ __device__ bool operator==(const cycle_iterator_adapter<T>& lhs,
const cycle_iterator_adapter<T>& rhs) {
return lhs.equal(rhs);
}
template <class T>
__host__ __device__ bool operator!=(const cycle_iterator_adapter<T>& lhs,
const cycle_iterator_adapter<T>& rhs) {
return !lhs.equal(rhs);
}
/**
* Does support concurrent insert, but not concurrent insert and probping.
*
* TODO:
* - add constructor that takes pointer to hash_table to avoid allocations
* - extend interface to accept streams
*/
template <typename Key, typename Element, Key unused_key,
typename Hasher = default_hash<Key>,
typename Equality = equal_to<Key>,
typename Allocator = managed_allocator<thrust::pair<Key, Element>>,
bool count_collisions = false>
class concurrent_unordered_map : public managed {
public:
using size_type = size_t;
using hasher = Hasher;
using key_equal = Equality;
using allocator_type = Allocator;
using key_type = Key;
using value_type = thrust::pair<Key, Element>;
using mapped_type = Element;
using iterator = cycle_iterator_adapter<value_type*>;
using const_iterator = const cycle_iterator_adapter<value_type*>;
private:
union pair2longlong {
unsigned long long int longlong;
value_type pair;
};
public:
concurrent_unordered_map(const concurrent_unordered_map&) = delete;
concurrent_unordered_map& operator=(const concurrent_unordered_map&) = delete;
explicit concurrent_unordered_map(size_type n,
const mapped_type unused_element,
const Hasher& hf = hasher(),
const Equality& eql = key_equal(),
const allocator_type& a = allocator_type())
: m_hf(hf),
m_equal(eql),
m_allocator(a),
m_hashtbl_size(n),
m_hashtbl_capacity(n),
m_collisions(0),
m_unused_element(
unused_element) { // allocate the raw data of hash table:
// m_hashtbl_values,pre-alloc it on current GPU if UM.
m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity);
constexpr int block_size = 128;
{
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(
&hashtbl_values_ptr_attributes, m_hashtbl_values);
#if CUDART_VERSION >= 10000
if (cudaSuccess == status &&
hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged)
#else
if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged)
#endif
{
int dev_id = 0;
CUDA_RT_CALL(cudaGetDevice(&dev_id));
CUDA_RT_CALL(cudaMemPrefetchAsync(
m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, 0));
}
}
// Initialize kernel, set all entry to unused <K,V>
init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size>>>(
m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element);
// CUDA_RT_CALL( cudaGetLastError() );
CUDA_RT_CALL(cudaStreamSynchronize(0));
CUDA_RT_CALL(cudaGetLastError());
}
~concurrent_unordered_map() {
m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity);
}
__host__ __device__ iterator begin() {
return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values);
}
__host__ __device__ const_iterator begin() const {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values);
}
__host__ __device__ iterator end() {
return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values + m_hashtbl_size);
}
__host__ __device__ const_iterator end() const {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values + m_hashtbl_size);
}
__host__ __device__ size_type size() const { return m_hashtbl_size; }
__host__ __device__ value_type* data() const { return m_hashtbl_values; }
__forceinline__ static constexpr __host__ __device__ key_type
get_unused_key() {
return unused_key;
}
// Generic update of a hash table value for any aggregator
template <typename aggregation_type>
__forceinline__ __device__ void update_existing_value(
mapped_type& existing_value, value_type const& insert_pair,
aggregation_type) {
// update without CAS
existing_value = insert_pair.second;
}
__forceinline__ __device__ void accum_existing_value_atomic(
mapped_type& existing_value, value_type const& accum_pair) {
// update with CAS
// existing_value = insert_pair.second;
int num_element =
sizeof(existing_value.data) / sizeof(*(existing_value.data));
const mapped_type& accumulator = accum_pair.second;
for (int i = 0; i < num_element; i++) {
atomicAdd(existing_value.data + i, accumulator.data[i]);
}
// atomicAdd(&existing_value, double val)
}
// TODO Overload atomicAdd for 1 byte and 2 byte types, until then, overload
// specifically for the
// types where atomicAdd already has an overload. Otherwise the generic
// update_existing_value will
// be used. Specialization for COUNT aggregator
/*
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<int32_t> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<int64_t> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<float> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<double> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
*/
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Inserts a new (key, value) pair. If the key already exists in
the map
an aggregation operation is performed with the new value and
existing value.
E.g., if the aggregation operation is 'max', then the maximum is
computed
between the new value and existing value and the result is
stored in the map.
*
* @Param[in] x The new (key, value) pair to insert
* @Param[in] op The aggregation operation to perform
* @Param[in] keys_equal An optional functor for comparing two keys
* @Param[in] precomputed_hash Indicates if a precomputed hash value is being
passed in to use
* to determine the write location of the new key
* @Param[in] precomputed_hash_value The precomputed hash value
* @tparam aggregation_type A functor for a binary operation that performs the
aggregation
* @tparam comparison_type A functor for comparing two keys
*
* @Returns An iterator to the newly inserted key,value pair
*/
/* ----------------------------------------------------------------------------*/
template <typename aggregation_type, class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ iterator insert(
const value_type& x, aggregation_type op,
comparison_type keys_equal = key_equal(), bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
hash_value_type hash_value{0};
// If a precomputed hash value has been passed in, then use it to determine
// the write location of the new key
if (true == precomputed_hash) {
hash_value = precomputed_hash_value;
}
// Otherwise, compute the hash value from the new key
else {
hash_value = m_hf(x.first);
}
size_type current_index = hash_value % hashtbl_size;
value_type* current_hash_bucket = &(hashtbl_values[current_index]);
const key_type insert_key = x.first;
bool insert_success = false;
size_type counter = 0;
while (false == insert_success) {
if (counter++ >= hashtbl_size) {
return end();
}
key_type& existing_key = current_hash_bucket->first;
mapped_type& existing_value = current_hash_bucket->second;
// Try and set the existing_key for the current hash bucket to insert_key
const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key);
// If old_key == unused_key, the current hash bucket was empty
// and existing_key was updated to insert_key by the atomicCAS.
// If old_key == insert_key, this key has already been inserted.
// In either case, perform the atomic aggregation of existing_value and
// insert_value
// Because the hash table is initialized with the identity value of the
// aggregation
// operation, it is safe to perform the operation when the existing_value
// still
// has its initial value
// TODO: Use template specialization to make use of native atomic
// functions
// TODO: How to handle data types less than 32 bits?
if (keys_equal(unused_key, old_key) || keys_equal(insert_key, old_key)) {
update_existing_value(existing_value, x, op);
insert_success = true;
}
current_index = (current_index + 1) % hashtbl_size;
current_hash_bucket = &(hashtbl_values[current_index]);
}
return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size,
current_hash_bucket);
}
/* This function is not currently implemented
__forceinline__
__host__ __device__ iterator insert(const value_type& x)
{
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
const size_type key_hash = m_hf( x.first );
size_type hash_tbl_idx = key_hash%hashtbl_size;
value_type* it = 0;
while (0 == it) {
value_type* tmp_it = hashtbl_values + hash_tbl_idx;
#ifdef __CUDA_ARCH__
if ( std::numeric_limits<key_type>::is_integer &&
std::numeric_limits<mapped_type>::is_integer && sizeof(unsigned long long int)
== sizeof(value_type)
)
{
pair2longlong converter = {0ull};
converter.pair = thrust::make_pair( unused_key, m_unused_element
);
const unsigned long long int unused = converter.longlong;
converter.pair = x;
const unsigned long long int value = converter.longlong;
const unsigned long long int old_val = atomicCAS(
reinterpret_cast<unsigned long long
int*>(tmp_it), unused, value ); if ( old_val == unused ) { it = tmp_it;
}
else if ( count_collisions )
{
atomicAdd( &m_collisions, 1 );
}
} else {
const key_type old_key = atomicCAS( &(tmp_it->first), unused_key,
x.first );
if ( m_equal( unused_key, old_key ) ) {
(m_hashtbl_values+hash_tbl_idx)->second = x.second;
it = tmp_it;
}
else if ( count_collisions )
{
atomicAdd( &m_collisions, 1 );
}
}
#else
#pragma omp critical
{
if ( m_equal( unused_key, tmp_it->first ) ) {
hashtbl_values[hash_tbl_idx] = thrust::make_pair( x.first,
x.second );
it = tmp_it;
}
}
#endif
hash_tbl_idx = (hash_tbl_idx+1)%hashtbl_size;
}
return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size,it);
}
*/
__forceinline__ __host__ __device__ const_iterator
find(const key_type& k) const {
size_type key_hash = m_hf(k);
size_type hash_tbl_idx = key_hash % m_hashtbl_size;
value_type* begin_ptr = 0;
size_type counter = 0;
while (0 == begin_ptr) {
value_type* tmp_ptr = m_hashtbl_values + hash_tbl_idx;
const key_type tmp_val = tmp_ptr->first;
if (m_equal(k, tmp_val)) {
begin_ptr = tmp_ptr;
break;
}
if (m_equal(unused_key, tmp_val) || counter > m_hashtbl_size) {
begin_ptr = m_hashtbl_values + m_hashtbl_size;
break;
}
hash_tbl_idx = (hash_tbl_idx + 1) % m_hashtbl_size;
++counter;
}
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
begin_ptr);
}
template <typename aggregation_type, typename counter_type,
class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ iterator get_insert(
const key_type& k, aggregation_type op, counter_type* value_counter,
comparison_type keys_equal = key_equal(), bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
hash_value_type hash_value{0};
// If a precomputed hash value has been passed in, then use it to determine
// the write location of the new key
if (true == precomputed_hash) {
hash_value = precomputed_hash_value;
}
// Otherwise, compute the hash value from the new key
else {
hash_value = m_hf(k);
}
size_type current_index = hash_value % hashtbl_size;
value_type* current_hash_bucket = &(hashtbl_values[current_index]);
const key_type insert_key = k;
bool insert_success = false;
size_type counter = 0;
while (false == insert_success) {
// Situation %5: No slot: All slot in the hashtable is occupied by other
// key, both get and
// insert fail. Return empty iterator
if (counter++ >= hashtbl_size) {
return end();
}
key_type& existing_key = current_hash_bucket->first;
volatile mapped_type& existing_value = current_hash_bucket->second;
// Try and set the existing_key for the current hash bucket to insert_key
const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key);
// If old_key == unused_key, the current hash bucket was empty
// and existing_key was updated to insert_key by the atomicCAS.
// If old_key == insert_key, this key has already been inserted.
// In either case, perform the atomic aggregation of existing_value and
// insert_value
// Because the hash table is initialized with the identity value of the
// aggregation
// operation, it is safe to perform the operation when the existing_value
// still
// has its initial value
// TODO: Use template specialization to make use of native atomic
// functions
// TODO: How to handle data types less than 32 bits?
// Situation #1: Empty slot: this key never exist in the table, ready to
// insert.
if (keys_equal(unused_key, old_key)) {
// update_existing_value(existing_value, x, op);
existing_value = (mapped_type)(atomicAdd(value_counter, 1));
break;
} // Situation #2+#3: Target slot: This slot is the slot for this key
else if (keys_equal(insert_key, old_key)) {
while (existing_value == m_unused_element) {
// Situation #2: This slot is inserting by another CUDA thread and the
// value is not yet
// ready, just wait
}
// Situation #3: This slot is already ready, get successfully and return
// (iterator of) the
// value
break;
}
// Situation 4: Wrong slot: This slot is occupied by other key, get fail,
// do nothing and
// linear probing to next slot.
current_index = (current_index + 1) % hashtbl_size;
current_hash_bucket = &(hashtbl_values[current_index]);
}
return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size,
current_hash_bucket);
}
int assign_async(const concurrent_unordered_map& other,
cudaStream_t stream = 0) {
m_collisions = other.m_collisions;
if (other.m_hashtbl_size <= m_hashtbl_capacity) {
m_hashtbl_size = other.m_hashtbl_size;
} else {
m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity);
m_hashtbl_capacity = other.m_hashtbl_size;
m_hashtbl_size = other.m_hashtbl_size;
m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity);
}
CUDA_RT_CALL(cudaMemcpyAsync(m_hashtbl_values, other.m_hashtbl_values,
m_hashtbl_size * sizeof(value_type),
cudaMemcpyDefault, stream));
return 0;
}
void clear_async(cudaStream_t stream = 0) {
constexpr int block_size = 128;
init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size, 0,
stream>>>(m_hashtbl_values, m_hashtbl_size, unused_key,
m_unused_element);
if (count_collisions) m_collisions = 0;
}
unsigned long long get_num_collisions() const { return m_collisions; }
void print() {
for (size_type i = 0; i < 5; ++i) {
std::cout << i << ": " << m_hashtbl_values[i].first << ","
<< m_hashtbl_values[i].second << std::endl;
}
}
int prefetch(const int dev_id, cudaStream_t stream = 0) {
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(
&hashtbl_values_ptr_attributes, m_hashtbl_values);
#if CUDART_VERSION >= 10000
if (cudaSuccess == status &&
hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged)
#else
if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged)
#endif
{
CUDA_RT_CALL(cudaMemPrefetchAsync(m_hashtbl_values,
m_hashtbl_size * sizeof(value_type),
dev_id, stream));
}
CUDA_RT_CALL(cudaMemPrefetchAsync(this, sizeof(*this), dev_id, stream));
return 0;
}
template <class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ const_iterator
accum(const value_type& x, comparison_type keys_equal = key_equal(),
bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const key_type& dst_key = x.first;
auto it = find(dst_key);
if (it == end()) {
return it;
}
value_type* dst = it.getter();
accum_existing_value_atomic(dst->second, x);
return it;
}
private:
const hasher m_hf;
const key_equal m_equal;
const mapped_type m_unused_element;
allocator_type m_allocator;
size_type m_hashtbl_size;
size_type m_hashtbl_capacity;
value_type* m_hashtbl_values;
unsigned long long m_collisions;
};
#endif // CONCURRENT_UNORDERED_MAP_CUH
|
simple.c | // RUN: %libomp-compile
// RUN: env OMP_DISPLAY_AFFINITY=false %libomp-run | %python %S/check.py -c 'NOTHING' %s
// RUN: env OMP_DISPLAY_AFFINITY=true OMP_NUM_THREADS=1 %libomp-run | %python %S/check.py -c 'CHECK' %s
// RUN: env OMP_DISPLAY_AFFINITY=true OMP_NUM_THREADS=2 %libomp-run | %python %S/check.py -c 'CHECK-2' %s
// RUN: env OMP_DISPLAY_AFFINITY=true OMP_NUM_THREADS=3 %libomp-run | %python %S/check.py -c 'CHECK-3' %s
// RUN: env OMP_DISPLAY_AFFINITY=true OMP_NUM_THREADS=4 %libomp-run | %python %S/check.py -c 'CHECK-4' %s
// RUN: env OMP_DISPLAY_AFFINITY=true OMP_NUM_THREADS=8 %libomp-run | %python %S/check.py -c 'CHECK-8' %s
// REQUIRES: !abt
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
omp_set_affinity_format("TESTER: tl:%L tn:%n nt:%N");
#pragma omp parallel
{ }
#pragma omp parallel
{ }
return 0;
}
// NOTHING: NO_OUTPUT
// CHECK: num_threads=1 TESTER: tl:1 tn:0 nt:1
// CHECK-2: num_threads=2 TESTER: tl:1 tn:[01] nt:2
// CHECK-3: num_threads=3 TESTER: tl:1 tn:[0-2] nt:3
// CHECK-4: num_threads=4 TESTER: tl:1 tn:[0-3] nt:4
// CHECK-8: num_threads=8 TESTER: tl:1 tn:[0-7] nt:8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.