source
stringlengths
3
92
c
stringlengths
26
2.25M
c_timers.c
#include "wtime.h" #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif /* Prototype */ void wtime( double * ); /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time( void ) { double t; #if defined(_OPENMP) && (_OPENMP > 200010) /* Use the OpenMP timer if we can */ t = omp_get_wtime(); #else wtime( &t ); #endif return( t ); } static double start[64], elapsed[64]; #ifdef _OPENMP #pragma omp threadprivate(start, elapsed) #endif /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear( int n ) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start( int n ) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop( int n ) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read( int n ) { return( elapsed[n] ); }
GB_binop__bclr_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_int32) // A.*B function (eWiseMult): GB (_AemultB_01__bclr_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_int32) // A.*B function (eWiseMult): GB (_AemultB_03__bclr_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int32) // C=scalar+B GB (_bind1st__bclr_int32) // C=scalar+B' GB (_bind1st_tran__bclr_int32) // C=A+scalar GB (_bind2nd__bclr_int32) // C=A'+scalar GB (_bind2nd_tran__bclr_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITCLR (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, int32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT32 || GxB_NO_BCLR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bclr_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bclr_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, int32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bclr_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, int32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bclr_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-499,512)),ceild(8*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(8*t1+Nx+7,512)),floord(16*t2+Nx+3,512)),floord(8*t3+Nx-5,512)),floord(16*t1-16*t2+Nz+Nx+5,512));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),128*t4+126);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
md5_bmark.c
/* * MD5 Benchmark * ------------- * File: md5_bmark.c * * This is the main file for the md5 benchmark kernel. This benchmark was * written as part of the StarBENCH benchmark suite at TU Berlin. It performs * MD5 computation on a number of self-generated input buffers in parallel, * automatically measuring execution time. * * Copyright (C) 2011 Michael Andersch * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <string.h> #include <sys/time.h> #include <omp.h> #include "md5.h" #include "md5_bmark.h" typedef struct timeval timer; #define TIME(x) gettimeofday(&x, NULL) /* Function declarations */ int initialize(md5bench_t* args); int finalize(md5bench_t* args); void run(md5bench_t* args); void process(uint8_t* in, uint8_t* out, int bufsize); void listInputs(); long timediff(timer* starttime, timer* finishtime); // Input configurations static data_t datasets[] = { {64, 512, 0}, {64, 1024, 0}, {64, 2048, 0}, {64, 4096, 0}, {128, 1024*512, 1}, {128, 1024*1024, 1}, {128, 1024*2048, 1}, {128, 1024*4096, 1}, }; /* * Function: initialize * -------------------- * To initialize the benchmark parameters. Generates the input buffers from random data. */ int initialize(md5bench_t* args) { int index = args->input_set; if(index < 0 || index >= sizeof(datasets)/sizeof(datasets[0])) { fprintf(stderr, "Invalid input set specified! Clamping to set 0\n"); index = 0; } args->numinputs = datasets[index].numbufs; args->size = datasets[index].bufsize; args->inputs = (uint8_t**)calloc(args->numinputs, sizeof(uint8_t*)); args->out = (uint8_t*)calloc(args->numinputs, DIGEST_SIZE); if(args->inputs == NULL || args->out == NULL) { fprintf(stderr, "Memory Allocation Error\n"); return -1; } //fprintf(stderr, "Reading input set: %d buffers, %d bytes per buffer\n", datasets[index].numbufs, datasets[index].bufsize); // Now the input buffers need to be generated, for replicability, use same seed srand(datasets[index].rseed); for(int i = 0; i < args->numinputs; i++) { args->inputs[i] = (uint8_t*)malloc(sizeof(uint8_t)*datasets[index].bufsize); uint8_t* p = args->inputs[i]; if(p == NULL) { fprintf(stderr, "Memory Allocation Error\n"); return -1; } for(int j = 0; j < datasets[index].bufsize; j++) *p++ = rand() % 255; } return 0; } /* * Function: process * ----------------- * Processes one input buffer, delivering the digest into out. */ void process(uint8_t* in, uint8_t* out, int bufsize) { MD5_CTX context; uint8_t digest[16]; #pragma omp parallel { #pragma omp task MD5_Init(&context); #pragma omp task MD5_Update(&context, in, bufsize); #pragma omp task MD5_Final(digest, &context); } memcpy(out, digest, DIGEST_SIZE); } /* * Function: run * -------------------- * Main benchmarking function. If called, processes buffers with MD5 * until no more buffers available. The resulting message digests * are written into consecutive locations in the preallocated output * buffer. */ void run(md5bench_t* args) { #pragma omp parallel { #pragma omp single nowait { for(int i = 0; i < args->iterations; i++) { #pragma omp task { int buffers_to_process = args->numinputs; int next = 0; uint8_t** in = args->inputs; uint8_t* out = args->out; while(buffers_to_process > 0) { process(in[next], out+next*DIGEST_SIZE, args->size); next++; buffers_to_process--; } } } } } } /* * Function: finalize * ------------------ * Cleans up memory used by the benchmark for input and output buffers. */ int finalize(md5bench_t* args) { char buffer[64]; int offset = 0; for(int i = 0; i < args->numinputs; i++) { #ifdef DEBUG sprintf(buffer, "Buffer %d has checksum ", i); fwrite(buffer, sizeof(char), strlen(buffer)+1, stdout); #endif for(int j = 0; j < DIGEST_SIZE*2; j+=2) { sprintf(buffer+j, "%x", args->out[DIGEST_SIZE*i+j/2] & 0xf); sprintf(buffer+j+1, "%x", args->out[DIGEST_SIZE*i+j/2] & 0xf0); } buffer[32] = '\0'; #ifdef DEBUG fwrite(buffer, sizeof(char), 32, stdout); fputc('\n', stdout); #else printf("%s ", buffer); #endif } #ifndef DEBUG printf("\n"); #endif if(args->inputs) { for(int i = 0; i < args->numinputs; i++) { if(args->inputs[i]) free(args->inputs[i]); } free(args->inputs); } if(args->out) free(args->out); return 0; } /* * Function: timediff * ------------------ * Compute the difference between timers starttime and finishtime in msecs. */ long timediff(timer* starttime, timer* finishtime) { long msec; msec=(finishtime->tv_sec-starttime->tv_sec)*1000; msec+=(finishtime->tv_usec-starttime->tv_usec)/1000; return msec; } /** MAIN **/ int main(int argc, char** argv) { timer b_start, b_end; md5bench_t args; //nt = number of threads int nt; //Receber parâmetros scanf("%d", &nt); scanf("%d", &args.input_set); scanf("%d", &args.iterations); args.outflag = 1; omp_set_num_threads(nt); // Parameter initialization if(initialize(&args)) { fprintf(stderr, "Initialization Error\n"); exit(EXIT_FAILURE); } TIME(b_start); run(&args); TIME(b_end); // Free memory if(finalize(&args)) { fprintf(stderr, "Finalization Error\n"); exit(EXIT_FAILURE); } double b_time = (double)timediff(&b_start, &b_end)/1000; printf("%.3f\n", b_time); return 0; } /* # Results ## System Info Operating System: 3.19.0-58-generic NAME="Ubuntu" VERSION="14.04.4 LTS, Trusty Tahr" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 14.04.4 LTS" VERSION_ID="14.04" CPU Name: 4th generation Intel(R) Core(TM) Processor family Frequency: 2.4 GHz Logical CPU Count: 8 ## Sequential Program Version Elapsed Time: 11.157s Clockticks: 36,426,054,639 Instructions Retired: 63,922,095,883 CPI Rate: 0.570 MUX Reliability: 0.974 Front-End Bound: 6.4% Bad Speculation: 0.4% Back-End Bound: 45.4% Memory Bound: 1.9% L1 Bound: 0.028 L3 Bound: Contested Accesses: 0.000 Data Sharing: 0.000 LLC Hit: 0.000 SQ Full: 0.000 DRAM Bound: Memory Latency: LLC Miss: 0.000 Store Bound: 0.000 Core Bound: 43.5% Divider: 0.000 Port Utilization: 0.693 Cycles of 0 Ports Utilized: 0.034 Cycles of 1 Port Utilized: 0.451 Cycles of 2 Ports Utilized: 0.237 Cycles of 3+ Ports Utilized: 0.224 Retiring: 47.8% Total Thread Count: 1 Paused Time: 0s ## Parallel Program Version Elapsed Time: 4.853s Clockticks: 36,484,054,726 Instructions Retired: 63,944,095,916 CPI Rate: 0.571 MUX Reliability: 0.926 Front-End Bound: 3.4% Bad Speculation: 0.4% Back-End Bound: 46.3% Memory Bound: 1.5% L1 Bound: 0.024 L3 Bound: Contested Accesses: 0.000 Data Sharing: 0.000 LLC Hit: 0.000 SQ Full: 0.000 DRAM Bound: Memory Latency: LLC Miss: 0.009 Store Bound: 0.001 Core Bound: 44.7% Divider: 0.000 Port Utilization: 0.735 Cycles of 0 Ports Utilized: 0.025 Cycles of 1 Port Utilized: 0.475 Cycles of 2 Ports Utilized: 0.259 Cycles of 3+ Ports Utilized: 0.218 Retiring: 50.0% Total Thread Count: 4 Paused Time: 0s */
ten_tusscher_2004_epi_S3_15.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S3_15.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.4536778755927,0.00132201466755546,0.776882180760055,0.776714012062442,0.000177819690535583,0.483897765693610,0.00296439161380545,0.999998309612983,1.97077339681427e-08,1.92017794452422e-05,0.999764538691899,1.00700003918174,0.999993820112011,4.74076184702537e-05,0.553071201822523,10.6955005973442,138.863214319207}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.5951660451258,0.000103567400537217,0.000134424511464274,0.000261002826954205,0.245313667680283,0.159725689747974,0.167456757509889,4.44865455807541,0.0152791374864867,1.17548122921737,1088.31101235283,0.000516923697255006,0.0952860789411678,0.0200000000000000,0.00400813836385454,4.59261947943359e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
PReLU.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/PReLU.c" #else void THNN_(PReLU_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THIndex_t nOutputPlane) { THTensor_(resizeAs)(output, input); if (nOutputPlane == 0) { // handle shared parameter case real w = *THTensor_(data)(weight); TH_TENSOR_APPLY2(real, output, real, input, *output_data = (*input_data > 0) ? *input_data : w*(*input_data); ); } else { input = THTensor_(newContiguous)(input); long bs = 1, ks = 1; { long input_ndim = THTensor_(nDimension)(input); if (input->size[input_ndim > 1] != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]); if (input_ndim > 1) { bs = input->size[0]; for (int d = 2; d < input_ndim; d++) { ks *= input->size[d]; } } } real *output_data = THTensor_(data)(output); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(weight); THIndex_t i, j, k; #pragma omp parallel for private(j,k) for (i = 0; i < bs; ++i) { real* n_input_data = input_data + i*nOutputPlane*ks; real* n_output_data = output_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { for (k = 0; k < ks; ++k) n_output_data[k] = (n_input_data[k] > 0) ? n_input_data[k] : weight_data[j] * n_input_data[k]; n_input_data += ks; n_output_data += ks; } } THTensor_(free)(input); } } void THNN_(PReLU_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THIndex_t nOutputPlane) { THNN_CHECK_NELEMENT(input, gradOutput); THTensor_(resizeAs)(gradInput, input); if (nOutputPlane == 0) { real w = THTensor_(data)(weight)[0]; TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, if ((*input_data) > 0) *gradInput_data = *gradOutput_data; else *gradInput_data = w * (*gradOutput_data); ); } else { input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THTensor_(newContiguous)(weight); const real *input_data = THTensor_(data)(input); const real *gradOutput_data = THTensor_(data)(gradOutput); const real *weight_data = THTensor_(data)(weight); real *gradInput_data = THTensor_(data)(gradInput); long bs = 1, ks = 1; { long input_ndim = THTensor_(nDimension)(input); if (input->size[input_ndim > 1] != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]); if (input_ndim > 1) { bs = input->size[0]; for (int d = 2; d < input_ndim; d++) { ks *= input->size[d]; } } } THIndex_t i, j, k; #pragma omp parallel for private(j,k) for (i = 0; i < bs; ++i) { const real *n_input_data = input_data + i*nOutputPlane*ks; const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; real *n_gradInput_data = gradInput_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { real w = weight_data[j]; for (k = 0; k < ks; ++k) { if (n_input_data[k] > 0) n_gradInput_data[k] = n_gradOutput_data[k]; else n_gradInput_data[k] = n_gradOutput_data[k] * w; } n_input_data += ks; n_gradInput_data += ks; n_gradOutput_data += ks; } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } } void THNN_(PReLU_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *gradWeight, THTensor *gradWeightBuf, THTensor *gradWeightBuf2, THIndex_t nOutputPlane, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_CHECK_NELEMENT(input, gradOutput); if (nOutputPlane == 0) { real *gradWeight_data = THTensor_(data)(gradWeight); real sum = 0; TH_TENSOR_APPLY2(real, input, real, gradOutput, if ((*input_data) <= 0) sum += (*input_data) * (*gradOutput_data); ); gradWeight_data[0] += scale * sum; } else { THArgCheck(THTensor_(isContiguous)(gradWeight), 6, "gradWeight needs to be contiguous"); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THTensor_(newContiguous)(weight); long bs = 1, ks = 1; { long input_ndim = THTensor_(nDimension)(input); if (input->size[input_ndim > 1] != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]); if (input_ndim > 1) { bs = input->size[0]; for (int d = 2; d < input_ndim; d++) { ks *= input->size[d]; } } } const real *input_data = THTensor_(data)(input); const real *gradOutput_data = THTensor_(data)(gradOutput); const real *weight_data = THTensor_(data)(weight); real *gradWeight_data = THTensor_(data)(gradWeight); THIndex_t i, j, k; for (i = 0; i < bs; ++i) { const real *n_input_data = input_data + i*nOutputPlane*ks; const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { real sum = 0; for (k = 0; k < ks; ++k) if (n_input_data[k] <= 0) sum += n_gradOutput_data[k] * n_input_data[k]; gradWeight_data[j] += scale * sum; n_input_data += ks; n_gradOutput_data += ks; } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } } #endif
normalize_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: jxyang@openailab.com */ #include <math.h> #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "normalize_param.h" static void norm_channel(float* input, float* output, float* buffer, float* scale, int hw, int channel, int num_thread) { memset(buffer, 0, hw * sizeof(float)); //#pragma omp parallel for num_threads(num_thread) for (int i = 0; i < channel; i++) { for (int j = 0; j < hw; j++) { float data = *(input + i * hw + j); buffer[j] += (data * data); } } //#pragma omp parallel for num_threads(num_thread) for (int j = 0; j < hw; j++) { buffer[j] = 1.f / sqrt(buffer[j]); } //#pragma omp parallel for num_threads(num_thread) for (int i = 0; i < channel; i++) { for (int j = 0; j < hw; j++) { float data = *(input + i * hw + j); *(output + i * hw + j) = data * buffer[j] * scale[i]; } } } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct ir_tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct ir_tensor* scale_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); normalize_param_t* param = ( normalize_param_t* )(ir_node->op.param_mem); float* input_org = ( float* )input_tensor->data; float* output_org = ( float* )output_tensor->data; float* sclae_org = ( float* )scale_tensor->data; int batch_number = input_tensor->dims[0]; int channel_num = input_tensor->dims[1]; int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]); int img_size = channel_num * channel_size; float* buffer = ( float* )sys_malloc(channel_size * sizeof(float)); if (param->channel_shared == 0 && param->across_spatial == 0) { for (int i = 0; i < batch_number; i++) { norm_channel(input_org, output_org, buffer, sclae_org, channel_size, channel_num, exec_graph->num_thread); input_org += img_size; output_org += img_size; } } sys_free(buffer); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_BEST; } static struct node_ops normalize_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int ret_normalize_node_ops(void* arg) { return register_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops); } static int unret_normalize_node_ops(void* arg) { return unregister_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops); } AUTO_REGISTER_OPS(ret_normalize_node_ops); AUTO_UNREGISTER_OPS(unret_normalize_node_ops);
GB_binop__isle_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_08__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_04__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int16) // A*D function (colscale): GB (_AxD__isle_int16) // D*A function (rowscale): GB (_DxB__isle_int16) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int16) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int16) // C=scalar+B GB (_bind1st__isle_int16) // C=scalar+B' GB (_bind1st_tran__isle_int16) // C=A+scalar GB (_bind2nd__isle_int16) // C=A'+scalar GB (_bind2nd_tran__isle_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT16 || GxB_NO_ISLE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
antidep2-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Race condition due to anti-dependence #include <stdlib.h> int main(int argc, char* argv[]) { int i; int len = 1000; if (argc>1) len = atoi(argv[1]); int a[len]; for (i=0; i<len; i++) a[i]= i; #pragma omp parallel for for (i=0;i< len -1 ;i++) a[i]=a[i+1]+1; return 0; }
GB_unaryop__abs_fp32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_uint16 // op(A') function: GB_tran__abs_fp32_uint16 // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_uint16 ( float *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
depend-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ extern int a[][10], a2[][10]; int b[10], c[10][2], d[10], e[10], f[10]; int b2[10], c2[10][2], d2[10], e2[10], f2[10]; int k[10], l[10], m[10], n[10], o; int *p; void bar (void); int t[10]; #pragma omp threadprivate (t) void foo (int g[3][10], int h[4][8], int i[2][10], int j[][9], int g2[3][10], int h2[4][8], int i2[2][10], int j2[][9]) { #pragma omp task depend(in: bar[2:5]) /* { dg-error "is not a variable" } */ ; #pragma omp task depend(out: t[2:5]) ; #pragma omp task depend(inout: k[0.5:]) /* { dg-error "low bound \[^\n\r]* of array section does not have integral type" } */ ; #pragma omp task depend(in: l[:7.5f]) /* { dg-error "length \[^\n\r]* of array section does not have integral type" } */ ; #pragma omp task depend(out: m[p:]) /* { dg-error "low bound \[^\n\r]* of array section does not have integral type" } */ ; #pragma omp task depend(inout: n[:p]) /* { dg-error "length \[^\n\r]* of array section does not have integral type" } */ ; #pragma omp task depend(in: o[2:5]) /* { dg-error "does not have pointer or array type" } */ ; #pragma omp task depend(out: a[:][2:4]) /* { dg-error "array type length expression must be specified" } */ ; #pragma omp task depend(inout: b[-1:]) /* { dg-error "negative low bound in array section" } */ ; #pragma omp task depend(inout: c[:-3][1:1]) /* { dg-error "negative length in array section" } */ ; #pragma omp task depend(in: d[11:]) /* { dg-error "low bound \[^\n\r]* above array section size" } */ ; #pragma omp task depend(out: e[:11]) /* { dg-error "length \[^\n\r]* above array section size" } */ ; #pragma omp task depend(out: f[1:10]) /* { dg-error "high bound \[^\n\r]* above array section size" } */ ; #pragma omp task depend(in: g[:][2:4]) /* { dg-error "for array function parameter length expression must be specified" } */ ; #pragma omp task depend(in: h[2:2][-1:]) /* { dg-error "negative low bound in array section" } */ ; #pragma omp task depend(inout: h[:1][:-3]) /* { dg-error "negative length in array section" } */ ; #pragma omp task depend(out: i[:1][11:]) /* { dg-error "low bound \[^\n\r]* above array section size" } */ ; #pragma omp task depend(in: j[3:4][:10]) /* { dg-error "length \[^\n\r]* above array section size" } */ ; #pragma omp task depend(out: j[30:10][5:5]) /* { dg-error "high bound \[^\n\r]* above array section size" } */ ; #pragma omp task depend(out: a2[:3][2:4]) ; #pragma omp task depend(inout: b2[0:]) ; #pragma omp task depend(inout: c2[:3][1:1]) ; #pragma omp task depend(in: d2[9:]) ; #pragma omp task depend(out: e2[:10]) ; #pragma omp task depend(out: f2[1:9]) ; #pragma omp task depend(in: g2[:2][2:4]) ; #pragma omp task depend(in: h2[2:2][0:]) ; #pragma omp task depend(inout: h2[:1][:3]) ; #pragma omp task depend(out: i2[:1][9:]) ; #pragma omp task depend(in: j2[3:4][:9]) ; #pragma omp task depend(out: j2[30:10][5:4]) ; }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)calloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths(char **paths, int n, int m) { char** random_paths = (char**)calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im = load_image_color(paths[i], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[i]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } extern int check_mistakes; box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)calloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) getchar(); *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)realloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d] \n", id, (classes-1)); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1 \n"); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f \n", x, y); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f \n", w); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f \n", h); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, float *old_truth) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*t_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) for (i = 0; i < n; ++i) { float *truth = (float*)calloc(5 * boxes, sizeof(float)); const char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { if (check_mistakes) getchar(); continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; //blur = rand_int(0, 1) ? (use_blur) : 0; int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 2) blur = use_blur; else blur = tmp_blur; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (min_w_h / 8 < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, blur, boxes, d.y.vals[i]); if (i_mixup) { image old_img = ai; old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5*boxes * sizeof(float)); if (show_imgs)// && i_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } } free(random_paths); if(mixup_random_paths) free(mixup_random_paths); return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int i; int data_size = new_img.w * new_img.h * new_img.c; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5 * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)calloc(5 * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)calloc(args.threads, sizeof(data)); pthread_t* threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)calloc(num, sizeof(float*)); r.y.vals = (float**)calloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)calloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)calloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)calloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
variable_utils.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Ruben Zorrilla // Vicente Mataix Ferrandiz // // #if !defined(KRATOS_VARIABLE_UTILS ) #define KRATOS_VARIABLE_UTILS /* System includes */ /* External includes */ /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "includes/checks.h" #include "utilities/parallel_utilities.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class VariableUtils * @ingroup KratosCore * @brief This class implements a set of auxiliar, already parallelized, methods to * perform some common tasks related with the variable values and fixity. * @details The methods are exported to python in order to add this improvements to the python interface * @author Riccardo Rossi * @author Ruben Zorrilla * @author Vicente Mataix Ferrandiz */ class KRATOS_API(KRATOS_CORE) VariableUtils { public: ///@name Type Definitions ///@{ /// The node type typedef ModelPart::NodeType NodeType; /// The condition type typedef ModelPart::ConditionType ConditionType; /// The element type typedef ModelPart::ElementType ElementType; /// We create the Pointer related to VariableUtils KRATOS_CLASS_POINTER_DEFINITION(VariableUtils); /// The nodes container typedef ModelPart::NodesContainerType NodesContainerType; /// The conditions container typedef ModelPart::ConditionsContainerType ConditionsContainerType; /// The elements container typedef ModelPart::ElementsContainerType ElementsContainerType; /// A definition of the double variable typedef Variable< double > DoubleVarType; /// A definition of the array variable typedef Variable< array_1d<double, 3 > > ArrayVarType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ /** Destructor. */ ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Copies the nodal value of a variable from an origin model * part nodes to the nodes in a destination model part. It is assumed that * both origin and destination model parts have the same number of nodes. * @param rVariable reference to the variable to get the value from * @param rDestinationVariable reference to the variable to be set * @param rOriginModelPart origin model part from where the values are retrieved * @param rDestinationModelPart destination model part to where the values are copied to * @param BuffStep buffer step */ template< class TVarType > void CopyModelPartNodalVar( const TVarType& rVariable, const TVarType& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const unsigned int BuffStep = 0) { const int n_orig_nodes = rOriginModelPart.NumberOfNodes(); const int n_dest_nodes = rDestinationModelPart.NumberOfNodes(); KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) << "Origin and destination model parts have different number of nodes." << "\n\t- Number of origin nodes: " << n_orig_nodes << "\n\t- Number of destination nodes: " << n_dest_nodes << std::endl; #pragma omp parallel for for(int i_node = 0; i_node < n_orig_nodes; ++i_node){ auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node; const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node; const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep); it_dest_node->GetSolutionStepValue(rDestinationVariable, BuffStep) = r_value; } } /** * @brief Copies the nodal value of a variable from an origin model * part nodes to the nodes in a destination model part. It is assumed that * both origin and destination model parts have the same number of nodes. * @param rVariable reference to the variable to get the value from and to save in * @param rOriginModelPart origin model part from where the values are retrieved * @param rDestinationModelPart destination model part to where the values are copied to * @param BuffStep buffer step */ template< class TVarType > void CopyModelPartNodalVar( const TVarType& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const unsigned int BuffStep = 0) { this->CopyModelPartNodalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep); } template< class TVarType > void CopyModelPartNodalVarToNonHistoricalVar( const TVarType &rVariable, const TVarType &rDestinationVariable, const ModelPart &rOriginModelPart, ModelPart &rDestinationModelPart, const unsigned int BuffStep = 0) { const int n_orig_nodes = rOriginModelPart.NumberOfNodes(); const int n_dest_nodes = rDestinationModelPart.NumberOfNodes(); KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) << "Origin and destination model parts have different number of nodes." << "\n\t- Number of origin nodes: " << n_orig_nodes << "\n\t- Number of destination nodes: " << n_dest_nodes << std::endl; #pragma omp parallel for for(int i_node = 0; i_node < n_orig_nodes; ++i_node){ auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node; const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node; const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep); it_dest_node->GetValue(rDestinationVariable) = r_value; } } template< class TVarType > void CopyModelPartNodalVarToNonHistoricalVar( const TVarType &rVariable, const ModelPart &rOriginModelPart, ModelPart &rDestinationModelPart, const unsigned int BuffStep = 0) { this->CopyModelPartNodalVarToNonHistoricalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0, const unsigned int WriteBufferStep = 0) { KRATOS_TRY KRATOS_ERROR_IF( rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable && ReadBufferStep == WriteBufferStep) << "Trying to copy flagged nodal solution step values with the same origin and destination model parts/variables/buffer steps. This is not permitted ( Origin model part: " << rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name() << ", variable: " << rOriginVariable.Name() << ", buffer step: " << ReadBufferStep << " ) !"; KRATOS_ERROR_IF_NOT(rOriginModelPart.HasNodalSolutionStepVariable(rOriginVariable)) << rOriginVariable.Name() << " is not found in nodal solution step variables list in origin model part ( " << rOriginModelPart.Name() << " )."; KRATOS_ERROR_IF_NOT(rDestinationModelPart.HasNodalSolutionStepVariable(rDestinationVariable)) << rDestinationVariable.Name() << " is not found in nodal solution step variables list in destination model part ( " << rDestinationModelPart.Name() << " )."; KRATOS_ERROR_IF(ReadBufferStep >= rOriginModelPart.GetBufferSize()) << "Origin model part ( " << rOriginModelPart.Name() << " ) buffer size is smaller or equal than read buffer size [ " << rOriginModelPart.GetBufferSize() << " <= " << ReadBufferStep << " ]."; KRATOS_ERROR_IF(WriteBufferStep >= rDestinationModelPart.GetBufferSize()) << "Destination model part ( " << rDestinationModelPart.Name() << " ) buffer size is smaller or equal than read buffer size [ " << rDestinationModelPart.GetBufferSize() << " <= " << WriteBufferStep << " ]."; CopyModelPartFlaggedVariable<NodesContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](NodeType& rDestNode, const TDataType& rValue) { rDestNode.FastGetSolutionStepValue( rDestinationVariable, WriteBufferStep) = rValue; }, [&](const NodeType& rOriginNode) -> const TDataType& { return rOriginNode.FastGetSolutionStepValue(rOriginVariable, ReadBufferStep); }); rDestinationModelPart.GetCommunicator().SynchronizeVariable(rDestinationVariable); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0, const unsigned int WriteBufferStep = 0) { KRATOS_TRY CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue, ReadBufferStep, WriteBufferStep); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0, const unsigned int WriteBufferStep = 0) { KRATOS_TRY CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, ReadBufferStep, WriteBufferStep); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0) { KRATOS_TRY KRATOS_ERROR_IF_NOT(rOriginModelPart.HasNodalSolutionStepVariable(rOriginVariable)) << rOriginVariable.Name() << " is not found in nodal solution step variables list in origin model part ( " << rOriginModelPart.Name() << " )."; KRATOS_ERROR_IF(ReadBufferStep >= rOriginModelPart.GetBufferSize()) << "Origin model part ( " << rOriginModelPart.Name() << " ) buffer size is smaller or equal than read buffer size [ " << rOriginModelPart.GetBufferSize() << " <= " << ReadBufferStep << " ]."; CopyModelPartFlaggedVariable<NodesContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](NodeType& rDestNode, const TDataType& rValue) { rDestNode.SetValue(rDestinationVariable, rValue); }, [&](const NodeType& rOriginNode) -> const TDataType& { return rOriginNode.FastGetSolutionStepValue(rOriginVariable, ReadBufferStep); }); rDestinationModelPart.GetCommunicator().SynchronizeNonHistoricalVariable(rDestinationVariable); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0) { CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue, ReadBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0) { CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, ReadBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0) { CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( rVariable, rVariable, rModelPart, rModelPart, rFlag, CheckValue, ReadBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int WriteBufferStep = 0) { KRATOS_TRY KRATOS_ERROR_IF_NOT(rDestinationModelPart.HasNodalSolutionStepVariable(rDestinationVariable)) << rDestinationVariable.Name() << " is not found in nodal solution step variables list in destination model part ( " << rDestinationModelPart.Name() << " )."; KRATOS_ERROR_IF(WriteBufferStep >= rDestinationModelPart.GetBufferSize()) << "Destination model part ( " << rDestinationModelPart.Name() << " ) buffer size is smaller or equal than read buffer size [ " << rDestinationModelPart.GetBufferSize() << " <= " << WriteBufferStep << " ]."; CopyModelPartFlaggedVariable<NodesContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](NodeType& rDestNode, const TDataType& rValue) { rDestNode.FastGetSolutionStepValue( rDestinationVariable, WriteBufferStep) = rValue; }, [&](const NodeType& rOriginNode) -> const TDataType& { return rOriginNode.GetValue(rOriginVariable); }); rDestinationModelPart.GetCommunicator().SynchronizeVariable(rDestinationVariable); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int WriteBufferStep = 0) { CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue, WriteBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int WriteBufferStep = 0) { CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, WriteBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( const Variable<TDataType>& rVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int WriteBufferStep = 0) { CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( rVariable, rVariable, rModelPart, rModelPart, rFlag, CheckValue, WriteBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { KRATOS_TRY KRATOS_ERROR_IF( rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable ) << "Trying to copy flagged nodal non-historical values with the same model parts/variables. This is not permitted ( Origin model part: " << rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name() << ", variable: " << rOriginVariable.Name() << " ) !"; CopyModelPartFlaggedVariable<NodesContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](NodeType& rDestNode, const TDataType& rValue) { rDestNode.SetValue(rDestinationVariable, rValue); }, [&](const NodeType& rOriginNode) -> const TDataType& { return rOriginNode.GetValue(rOriginVariable); }); rDestinationModelPart.GetCommunicator().SynchronizeNonHistoricalVariable(rDestinationVariable); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedElementVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { KRATOS_TRY KRATOS_ERROR_IF(rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable) << "Trying to copy flagged elemental variable data with the same model " "parts/variables. This is not permitted ( Origin model part: " << rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name() << ", variable: " << rOriginVariable.Name() << " ) !"; CopyModelPartFlaggedVariable<ElementsContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](ElementType& rDestElement, const TDataType& rValue) { rDestElement.SetValue(rDestinationVariable, rValue); }, [&](const ElementType& rOriginElement) -> const TDataType& { return rOriginElement.GetValue(rOriginVariable); }); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedElementVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedElementVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedElementVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedElementVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedConditionVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { KRATOS_TRY KRATOS_ERROR_IF(rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable) << "Trying to copy flagged condition variable data with the same model " "parts/variables. This is not permitted ( Origin model part: " << rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name() << ", variable: " << rOriginVariable.Name() << " ) !"; CopyModelPartFlaggedVariable<ConditionsContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](ConditionType& rDestCondition, const TDataType& rValue) { rDestCondition.SetValue(rDestinationVariable, rValue); }, [&](const ConditionType& rOriginCondition) -> const TDataType& { return rOriginCondition.GetValue(rOriginVariable); }); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedConditionVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedConditionVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedConditionVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedConditionVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue); } /** * @brief Copies the elemental value of a variable from an origin model * part elements to the elements in a destination model part. It is assumed that * both origin and destination model parts have the same number of elements. * @param rVariable reference to the variable to be set * @param rOriginModelPart origin model part from where the values are retrieved * @param rDestinationModelPart destination model part to where the values are copied to * @param BuffStep buffer step */ template< class TVarType > void CopyModelPartElementalVar( const TVarType& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart){ const int n_orig_elems = rOriginModelPart.NumberOfElements(); const int n_dest_elems = rDestinationModelPart.NumberOfElements(); KRATOS_ERROR_IF_NOT(n_orig_elems == n_dest_elems) << "Origin and destination model parts have different number of elements." << "\n\t- Number of origin elements: " << n_orig_elems << "\n\t- Number of destination elements: " << n_dest_elems << std::endl; #pragma omp parallel for for(int i_elems = 0; i_elems < n_orig_elems; ++i_elems){ auto it_dest_elems = rDestinationModelPart.ElementsBegin() + i_elems; const auto &it_orig_elems = rOriginModelPart.ElementsBegin() + i_elems; const auto &r_value = it_orig_elems->GetValue(rVariable); it_dest_elems->SetValue(rVariable,r_value); } } /** * @brief Sets the nodal value of a scalar variable * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rNodes reference to the objective node set */ template <class TVarType> KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable") void SetScalarVar( const TVarType &rVariable, const double Value, NodesContainerType &rNodes) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->FastGetSolutionStepValue(rVariable) = Value; } KRATOS_CATCH("") } /** * @brief Sets the nodal value of a scalar variable (considering flag) * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rNodes reference to the objective node set * @param Flag The flag to be considered in the assignation * @param Check What is checked from the flag */ template< class TVarType > KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable") void SetScalarVarForFlag( const TVarType& rVariable, const double Value, NodesContainerType& rNodes, const Flags Flag, const bool Check = true ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; if (it_node->Is(Flag) == Check) it_node->FastGetSolutionStepValue(rVariable) = Value; } KRATOS_CATCH("") } /** * @brief Sets the nodal value of a vector variable * @param rVariable reference to the vector variable to be set * @param Value array containing the Value to be set * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable") void SetVectorVar( const ArrayVarType& rVariable, const array_1d<double, 3 >& Value, NodesContainerType& rNodes ); /** * @brief Sets the nodal value of a vector variable (considering flag) * @param rVariable reference to the vector variable to be set * @param Value array containing the Value to be set * @param rNodes reference to the objective node set * @param Flag The flag to be considered in the assignation * @param Check What is checked from the flag */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable") void SetVectorVarForFlag( const ArrayVarType& rVariable, const array_1d<double, 3 >& Value, NodesContainerType& rNodes, const Flags Flag, const bool Check = true ); /** * @brief Sets the nodal value of a scalar variable * @tparam TDataType Variable data type * @tparam Variable<TDataType> Variable type * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rNodes reference to the objective node set */ template<class TDataType, class TVarType = Variable<TDataType> > void SetVariable( const TVarType& rVariable, const TDataType& rValue, NodesContainerType& rNodes ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->FastGetSolutionStepValue(rVariable) = rValue; } KRATOS_CATCH("") } /** * @brief Sets the nodal value of a scalar variable (considering flag) * @tparam TDataType Variable data type * @tparam Variable<TDataType> Variable type * @param rVariable reference to the scalar variable to be set * @param rValue Value to be set * @param rNodes reference to the objective node set * @param Flag The flag to be considered in the assignation * @param Check What is checked from the flag */ template <class TDataType, class TVarType = Variable<TDataType>> void SetVariable( const TVarType &rVariable, const TDataType &rValue, NodesContainerType &rNodes, const Flags Flag, const bool CheckValue = true) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k < static_cast<int>(rNodes.size()); ++k) { auto it_node = rNodes.begin() + k; if (it_node->Is(Flag) == CheckValue) { it_node->FastGetSolutionStepValue(rVariable) = rValue; } } KRATOS_CATCH("") } /** * @brief Sets the nodal value of any variable to zero * @param rVariable reference to the scalar variable to be set * @param rNodes reference to the objective node set */ template< class TType , class TContainerType> void SetNonHistoricalVariableToZero( const Variable< TType >& rVariable, TContainerType& rContainer) { KRATOS_TRY this->SetNonHistoricalVariable(rVariable, rVariable.Zero(), rContainer); KRATOS_CATCH("") } /** * @brief Sets the nodal value of any variable to zero * @param rVariable reference to the scalar variable to be set * @param rNodes reference to the objective node set */ template< class TType > void SetHistoricalVariableToZero( const Variable< TType >& rVariable, NodesContainerType& rNodes) { KRATOS_TRY this->SetVariable(rVariable, rVariable.Zero(), rNodes); KRATOS_CATCH("") } /** * @brief Sets the nodal value of a scalar variable non historical * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rNodes reference to the objective node set */ template< class TVarType > KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable") void SetNonHistoricalScalarVar( const TVarType& rVariable, const double Value, NodesContainerType& rNodes ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->SetValue(rVariable, Value); } KRATOS_CATCH("") } /** * @brief Sets the nodal value of a vector non historical variable * @param rVariable reference to the vector variable to be set * @param Value array containing the Value to be set * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable") void SetNonHistoricalVectorVar( const ArrayVarType& rVariable, const array_1d<double, 3 >& Value, NodesContainerType& rNodes ); /** * @brief Sets the container value of any type of non historical variable * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rContainer Reference to the objective container */ template< class TType, class TContainerType, class TVarType = Variable< TType >> void SetNonHistoricalVariable( const TVarType& rVariable, const TType& Value, TContainerType& rContainer ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = rContainer.begin() + k; it_cont->SetValue(rVariable, Value); } KRATOS_CATCH("") } /** * @brief Sets the container value of any type of non historical variable (considering flag) * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rContainer Reference to the objective container * @param Flag The flag to be considered in the assignation * @param Check What is checked from the flag */ template< class TType, class TContainerType, class TVarType = Variable< TType >> void SetNonHistoricalVariable( const TVarType& rVariable, const TType& rValue, TContainerType& rContainer, const Flags Flag, const bool Check = true ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = rContainer.begin() + k; if (it_cont->Is(Flag) == Check) { it_cont->SetValue(rVariable, rValue); } } KRATOS_CATCH("") } /** * @brief Clears the container data value container * @param rContainer Reference to the objective container */ template< class TContainerType> void ClearNonHistoricalData(TContainerType& rContainer) { KRATOS_TRY const auto it_cont_begin = rContainer.begin(); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = it_cont_begin + k; it_cont->Data().Clear(); } KRATOS_CATCH("") } /** * @brief Distributes variable values in TContainerType container to nodes * * This method distributes variables values stored in TContainerType data value container in rModelPart * to nodes. Constant weighting is used for each node based on rWeightVariable value. The result * is stored in nodal non-historical data value container under the same rVariable. If IsInverseWeightProvided * is true, then the weights provided by rWeightVariable is inverted to get nodal weight. Otherwise, the value * given by rWeightVariable is used as weight. * * * @tparam TDataType Data type * @tparam TContainerType ContainerType of model part * @tparam TWeightDataType Data type of weight variable (this should be either int or double) * @param rModelPart Model part * @param rVariable Variable to be distributed * @param rWeightVariable Variable which holds weight to distribute entity values to nodes * @param IsInverseWeightProvided Whether the weight is provided as inverse or not. */ template <class TDataType, class TContainerType, class TWeightDataType> void WeightedAccumulateVariableOnNodes( ModelPart& rModelPart, const Variable<TDataType>& rVariable, const Variable<TWeightDataType>& rWeightVariable, const bool IsInverseWeightProvided = false); /** * @brief Sets a flag according to a given status over a given container * @param rFlag flag to be set * @param rFlagValue flag value to be set * @param rContainer Reference to the objective container */ template< class TContainerType > void SetFlag( const Flags& rFlag, const bool& rFlagValue, TContainerType& rContainer ) { KRATOS_TRY const auto it_cont_begin = rContainer.begin(); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = it_cont_begin + k; it_cont->Set(rFlag, rFlagValue); } KRATOS_CATCH("") } /** * @brief Flips a flag over a given container * @param rFlag flag to be set * @param rContainer Reference to the objective container */ template< class TContainerType > void ResetFlag( const Flags& rFlag, TContainerType& rContainer ) { KRATOS_TRY const auto it_cont_begin = rContainer.begin(); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = it_cont_begin + k; it_cont->Reset(rFlag); } KRATOS_CATCH("") } /** * @brief Flips a flag over a given container * @param rFlag flag to be set * @param rContainer Reference to the objective container */ template< class TContainerType > void FlipFlag( const Flags& rFlag, TContainerType& rContainer ) { KRATOS_TRY const auto it_cont_begin = rContainer.begin(); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = it_cont_begin + k; it_cont->Flip(rFlag); } KRATOS_CATCH("") } /** * @brief Takes the value of a non-historical vector variable and sets it in other variable * @param OriginVariable reference to the origin vector variable * @param SavedVariable reference to the destination vector variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable") void SaveVectorVar( const ArrayVarType& OriginVariable, const ArrayVarType& SavedVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of a non-historical scalar variable and sets it in other variable * @param OriginVariable reference to the origin scalar variable * @param SavedVariable reference to the destination scalar variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable") void SaveScalarVar( const DoubleVarType& OriginVariable, const DoubleVarType& SavedVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of a non-historical variable and saves it in another variable * For a nodal container, this takes the value of a non-historical variable and saves it in another one * @tparam TDataType The variable data type * @tparam Variable<TDataType> The variable type * @param rOriginVariable Reference to the origin variable * @param rSavedVariable Reference to the destination variable * @param rNodesContainer Reference to the nodal container */ template< class TDataType, class TVariableType = Variable<TDataType> > void SaveVariable( const TVariableType &rOriginVariable, const TVariableType &rSavedVariable, NodesContainerType &rNodesContainer) { KRATOS_TRY #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) { auto it_node = rNodesContainer.begin() + i_node; it_node->SetValue(rSavedVariable, it_node->FastGetSolutionStepValue(rOriginVariable)); } KRATOS_CATCH("") } /** * @brief Takes the value of a non-historical vector variable and sets it in other non-historical variable * @param OriginVariable reference to the origin vector variable * @param SavedVariable reference to the destination vector variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable") void SaveVectorNonHistoricalVar( const ArrayVarType& OriginVariable, const ArrayVarType& SavedVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of a non-historical scalar variable and sets it in other non-historical variable * @param OriginVariable reference to the origin scalar variable * @param SavedVariable reference to the destination scalar variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable") void SaveScalarNonHistoricalVar( const DoubleVarType& OriginVariable, const DoubleVarType& SavedVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of a non-historical variable and saves it in another historical variable * For a non-nodal container, this method takes the value of an origin variable and saves it in a destination one * @tparam TDataType The variable data type * @tparam TContainerType The container type * @tparam Variable<TDataType> The variable type * @param rOriginVariable Reference to the origin variable * @param rSavedVariable Reference to the destination variable * @param rContainer Reference to the container of interest */ template< class TDataType, class TContainerType, class TVariableType = Variable<TDataType> > void SaveNonHistoricalVariable( const TVariableType &rOriginVariable, const TVariableType &rSavedVariable, TContainerType &rContainer ) { KRATOS_TRY #pragma omp parallel for for (int i = 0; i < static_cast<int>(rContainer.size()); ++i) { auto it_cont = rContainer.begin() + i; it_cont->SetValue(rSavedVariable, it_cont->GetValue(rOriginVariable)); } KRATOS_CATCH("") } /** * @brief Takes the value of an historical vector variable and sets it in other variable * @param OriginVariable reference to the origin vector variable * @param DestinationVariable reference to the destination vector variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable") void CopyVectorVar( const ArrayVarType& OriginVariable, const ArrayVarType& DestinationVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of an historical double variable and sets it in other variable * @param OriginVariable reference to the origin double variable * @param DestinationVariable reference to the destination double variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable") void CopyScalarVar( const DoubleVarType &OriginVariable, const DoubleVarType &DestinationVariable, NodesContainerType &rNodes); /** * @brief Takes the value of an historical variable and sets it in another variable * This function takes the value of an historical variable and sets in another * variable in all the nodes of the provided container. * @tparam TDataType The variable data type * @tparam Variable<TDataType> The variable type * @param rOriginVariable Reference to the origin variable * @param rDestinationVariable Reference to the destination variable * @param rNodesContainer Reference to the nodes container */ template< class TDataType, class TVariableType = Variable<TDataType> > void CopyVariable( const TVariableType &rOriginVariable, const TVariableType &rDestinationVariable, NodesContainerType &rNodesContainer) { KRATOS_TRY #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) { auto it_node = rNodesContainer.begin() + i_node; it_node->FastGetSolutionStepValue(rDestinationVariable) = it_node->FastGetSolutionStepValue(rOriginVariable); } KRATOS_CATCH("") } /** * @brief Returns a list of nodes filtered using the given double variable and value * @param Variable reference to the double variable to be filtered * @param Value Filtering Value * @param rOriginNodes Reference to the objective node set * @return selected_nodes: List of filtered nodes */ NodesContainerType SelectNodeList( const DoubleVarType& Variable, const double Value, const NodesContainerType& rOriginNodes ); /** * @brief Checks if all the nodes of a node set has the specified variable * @param rVariable reference to a variable to be checked * @param rNodes reference to the nodes set to be checked * @return 0: if succeeds, return 0 */ template<class TVarType> int CheckVariableExists( const TVarType& rVariable, const NodesContainerType& rNodes ) { KRATOS_TRY for (auto& i_node : rNodes) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(rVariable, i_node); return 0; KRATOS_CATCH(""); } /** * @brief Fixes or frees a variable for all of the nodes in the list. The dof has to exist. * @param rVar reference to the variable to be fixed or freed * @param IsFixed if true fixes, if false frees * @param rNodes reference to the nodes set to be frixed or freed */ template< class TVarType > void ApplyFixity( const TVarType& rVar, const bool IsFixed, NodesContainerType& rNodes ) { KRATOS_TRY if (rNodes.size() != 0) { // checking the first node to avoid error being thrown in parallel region KRATOS_ERROR_IF_NOT(rNodes.begin()->HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << rNodes.begin()->Id() << "!" << std::endl; #ifdef KRATOS_DEBUG for (const auto& r_node : rNodes) { KRATOS_ERROR_IF_NOT(r_node.HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << r_node.Id() << "!" << std::endl; } #endif CheckVariableExists(rVar, rNodes); if (IsFixed) { #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->pGetDof(rVar)->FixDof(); } } else { #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->pGetDof(rVar)->FreeDof(); } } } KRATOS_CATCH("") } /** * @brief Fixes/Frees dofs based on a flag * * This method fixes/frees given rVariable, if rFlag matches CheckValue provided for that * specific node. * * @tparam TVarType Variable type * @param rVariable Variable to be fixed or freed * @param IsFixed True to fix variable, false to free variable * @param rNodes Nodes container * @param rFlag Flag to be checked to fix or free * @param CheckValue Flag value which is checked against */ template< class TVarType > void ApplyFixity( const TVarType& rVariable, const bool IsFixed, NodesContainerType& rNodes, const Flags& rFlag, const bool CheckValue = true) { KRATOS_TRY if (rNodes.size() != 0) { // checking the first node to avoid error being thrown in parallel region KRATOS_ERROR_IF_NOT(rNodes.begin()->HasDofFor(rVariable)) << "Trying to fix/free dof of variable " << rVariable.Name() << " but this dof does not exist in node #" << rNodes.begin()->Id() << "!" << std::endl; #ifdef KRATOS_DEBUG for (const auto& r_node : rNodes) { KRATOS_ERROR_IF_NOT(r_node.HasDofFor(rVariable)) << "Trying to fix/free dof of variable " << rVariable.Name() << " but this dof does not exist in node #" << r_node.Id() << "!" << std::endl; } #endif CheckVariableExists(rVariable, rNodes); if (IsFixed) { BlockPartition<NodesContainerType>(rNodes).for_each( [&rVariable, &rFlag, CheckValue](NodeType& rNode) { if (rNode.Is(rFlag) == CheckValue) { rNode.pGetDof(rVariable)->FixDof(); } }); } else { BlockPartition<NodesContainerType>(rNodes).for_each( [&rVariable, &rFlag, CheckValue](NodeType& rNode) { if (rNode.Is(rFlag) == CheckValue) { rNode.pGetDof(rVariable)->FreeDof(); } }); } } KRATOS_CATCH(""); } /** * @brief Loops along a vector data to set its values to the nodes contained in a node set. * @note This function is suitable for scalar historical variables, since each * one of the values in the data vector is set to its correspondent node. Besides, * the values must be sorted as the nodes are (value i corresponds to node i). * @param rVar reference to the variable to be fixed or freed * @param rData rData vector. Note that its lenght must equal the number of nodes * @param rNodes reference to the nodes set to be set */ template< class TVarType > void ApplyVector( const TVarType& rVar, const Vector& rData, NodesContainerType& rNodes ) { KRATOS_TRY if(rNodes.size() != 0 && rNodes.size() == rData.size()) { // First we do a check CheckVariableExists(rVar, rNodes); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->FastGetSolutionStepValue(rVar) = rData[k]; } } else KRATOS_ERROR << "There is a mismatch between the size of data array and the number of nodes "; KRATOS_CATCH("") } /** * @brief Returns the nodal value summation of a non-historical vector variable. * @param rVar reference to the vector variable to summed * @param rModelPart reference to the model part that contains the objective node set * @return sum_value: summation vector result */ array_1d<double, 3> SumNonHistoricalNodeVectorVariable( const ArrayVarType& rVar, const ModelPart& rModelPart ); /** * @brief Returns the nodal value summation of a non-historical scalar variable. * @param rVar reference to the scalar variable to be summed * @param rModelPart reference to the model part that contains the objective node set * @return sum_value: summation result */ template< class TVarType > double SumNonHistoricalNodeScalarVariable( const TVarType& rVar, const ModelPart& rModelPart ) { KRATOS_TRY double sum_value = 0.0; // Getting info const auto& r_communicator = rModelPart.GetCommunicator(); const auto& r_local_mesh = r_communicator.LocalMesh(); const auto& r_nodes_array = r_local_mesh.Nodes(); const auto it_node_begin = r_nodes_array.begin(); #pragma omp parallel for reduction(+:sum_value) for (int k = 0; k < static_cast<int>(r_nodes_array.size()); ++k) { const auto it_node = it_node_begin + k; sum_value += it_node->GetValue(rVar); } return r_communicator.GetDataCommunicator().SumAll(sum_value); KRATOS_CATCH("") } /** * @brief This method accumulates and return a variable value * For a nodal historical variable, this method accumulates and * returns the summation in a model part. * @tparam TDataType Variable datatype * @tparam Variable<TDataType> Variable type * @param rVariable Nodal historical variable to be accumulated * @param rModelPart Model part in where the summation is done * @param BuffStep Buffer position * @return TDataType Value of the summation */ template< class TDataType, class TVarType = Variable<TDataType> > TDataType SumHistoricalVariable( const TVarType &rVariable, const ModelPart &rModelPart, const unsigned int BuffStep = 0 ) { KRATOS_TRY TDataType sum_value; AuxiliaryInitializeValue(sum_value); const auto &r_communicator = rModelPart.GetCommunicator(); const int n_nodes = r_communicator.LocalMesh().NumberOfNodes(); #pragma omp parallel firstprivate(n_nodes) { TDataType private_sum_value; AuxiliaryInitializeValue(private_sum_value); #pragma omp for for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = r_communicator.LocalMesh().NodesBegin() + i_node; private_sum_value += it_node->GetSolutionStepValue(rVariable, BuffStep); } AuxiliaryAtomicAdd(private_sum_value, sum_value); } return r_communicator.GetDataCommunicator().SumAll(sum_value); KRATOS_CATCH("") } /** * @brief Returns the condition value summation of a historical vector variable * @param rVar reference to the vector variable to be summed * @param rModelPart reference to the model part that contains the objective condition set * @return sum_value: summation result */ array_1d<double, 3> SumConditionVectorVariable( const ArrayVarType& rVar, const ModelPart& rModelPart ); /** * @brief Returns the condition value summation of a historical scalar variable * @param rVar reference to the scalar variable to be summed * @param rModelPart reference to the model part that contains the objective condition set * @return sum_value: summation result */ template< class TVarType > double SumConditionScalarVariable( const TVarType& rVar, const ModelPart& rModelPart ) { KRATOS_TRY double sum_value = 0.0; // Getting info const auto& r_communicator = rModelPart.GetCommunicator(); const auto& r_local_mesh = r_communicator.LocalMesh(); const auto& r_conditions_array = r_local_mesh.Conditions(); const auto it_cond_begin = r_conditions_array.begin(); #pragma omp parallel for reduction(+:sum_value) for (int k = 0; k < static_cast<int>(r_conditions_array.size()); ++k) { const auto it_cond = it_cond_begin + k; sum_value += it_cond->GetValue(rVar); } return r_communicator.GetDataCommunicator().SumAll(sum_value); KRATOS_CATCH("") } /** * @brief Returns the element value summation of a historical vector variable * @param rVar reference to the vector variable to be summed * @param rModelPart reference to the model part that contains the objective element set * @return sum_value: summation result */ array_1d<double, 3> SumElementVectorVariable( const ArrayVarType& rVar, const ModelPart& rModelPart ); /** * @brief Returns the element value summation of a historical scalar variable * @param rVar reference to the scalar variable to be summed * @param rModelPart reference to the model part that contains the objective element set * @return sum_value: summation result */ template< class TVarType > double SumElementScalarVariable( const TVarType& rVar, const ModelPart& rModelPart ) { KRATOS_TRY double sum_value = 0.0; // Getting info const auto& r_communicator = rModelPart.GetCommunicator(); const auto& r_local_mesh = r_communicator.LocalMesh(); const auto& r_elements_array = r_local_mesh.Elements(); const auto it_elem_begin = r_elements_array.begin(); #pragma omp parallel for reduction(+:sum_value) for (int k = 0; k < static_cast<int>(r_elements_array.size()); ++k) { const auto it_elem = it_elem_begin + k; sum_value += it_elem->GetValue(rVar); } return r_communicator.GetDataCommunicator().SumAll(sum_value); KRATOS_CATCH("") } /** * @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel * @param rVar The variable to be added as DoF * @param rModelPart reference to the model part that contains the objective element set */ template< class TVarType > void AddDof( const TVarType& rVar, ModelPart& rModelPart ) { KRATOS_TRY // First we do a chek if(rModelPart.NumberOfNodes() != 0) KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: Variable : " << rVar << "not included in the Solution step data "; rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar); #pragma omp parallel for for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) { auto it_node = rModelPart.NodesBegin() + k; it_node->AddDof(rVar); } KRATOS_CATCH("") } /** * @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel * @param rVar The variable to be added as DoF * @param rReactionVar The corresponding reaction to the added DoF * @param rModelPart reference to the model part that contains the objective element set */ template< class TVarType > void AddDofWithReaction( const TVarType& rVar, const TVarType& rReactionVar, ModelPart& rModelPart ) { KRATOS_TRY if(rModelPart.NumberOfNodes() != 0) { KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: DoF Variable : " << rVar << "not included in the Soluttion step data "; KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rReactionVar)) << "ERROR:: Reaction Variable : " << rReactionVar << "not included in the Soluttion step data "; } // If in debug we do a check for all nodes #ifdef KRATOS_DEBUG CheckVariableExists(rVar, rModelPart.Nodes()); CheckVariableExists(rReactionVar, rModelPart.Nodes()); #endif rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar, &rReactionVar); #pragma omp parallel for for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) { auto it_node = rModelPart.NodesBegin() + k; it_node->AddDof(rVar,rReactionVar); } KRATOS_CATCH("") } /** * @brief This method checks the variable keys * @return True if all the keys are correct */ bool CheckVariableKeys(); /** * @brief This method updates the current nodal coordinates back to the initial coordinates * @param rNodes the nodes to be updated */ void UpdateCurrentToInitialConfiguration(const ModelPart::NodesContainerType& rNodes); /** * @param rNodes the nodes to be updated * @brief This method updates the initial nodal coordinates to the current coordinates */ void UpdateInitialToCurrentConfiguration(const ModelPart::NodesContainerType& rNodes); /** * @brief This method updates the current coordinates * For each node, this method takes the value of the provided variable and updates the * current position as the initial position (X0, Y0, Z0) plus such variable value * @param rNodes * @param rUpdateVariable variable to retrieve the updating values from */ void UpdateCurrentPosition( const ModelPart::NodesContainerType& rNodes, const ArrayVarType& rUpdateVariable = DISPLACEMENT, const IndexType BufferPosition = 0 ); ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief Auxiliary double initialize method * Auxiliary method to initialize a double value * @param rValue Variable to initialize */ void AuxiliaryInitializeValue(double &rValue); /** * @brief Auxiliary array initialize method * Auxiliary method to initialize an array value * @param rValue Variable to initialize */ void AuxiliaryInitializeValue(array_1d<double,3> &rValue); /** * @brief Auxiliary scalar reduce method * Auxiliary method to perform the reduction of a scalar value * @param rPrivateValue Private variable to reduce * @param rSumValue Variable to save the reduction */ void AuxiliaryAtomicAdd( const double &rPrivateValue, double &rSumValue ); /** * @brief Auxiliary array reduce method * Auxiliary method to perform the reduction of an array value * @param rPrivateValue Private variable to reduce * @param rSumValue Variable to save the reduction */ void AuxiliaryAtomicAdd( const array_1d<double,3> &rPrivateValue, array_1d<double,3> &rSumValue ); /** * @brief This is auxiliar method to check the keys * @return True if all the keys are OK */ template< class TVarType > bool CheckVariableKeysHelper() { KRATOS_TRY for (const auto& var : KratosComponents< TVarType >::GetComponents()) { if (var.first == "NONE" || var.first == "") std::cout << " var first is NONE or empty " << var.first << var.second << std::endl; if (var.second->Name() == "NONE" || var.second->Name() == "") std::cout << var.first << var.second << std::endl; if (var.first != var.second->Name()) //name of registration does not correspond to the var name std::cout << "Registration Name = " << var.first << " Variable Name = " << std::endl; } return true; KRATOS_CATCH("") } template <class TContainerType> TContainerType& GetContainer(ModelPart& rModelPart); template <class TContainerType> const TContainerType& GetContainer(const ModelPart& rModelPart); template <class TContainerType, class TSetterFunction, class TGetterFunction> void CopyModelPartFlaggedVariable( const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue, TSetterFunction&& rSetterFunction, TGetterFunction&& rGetterFunction) { KRATOS_TRY const auto& r_origin_container = GetContainer<TContainerType>(rOriginModelPart); auto& r_destination_container = GetContainer<TContainerType>(rDestinationModelPart); const int number_of_origin_items = r_origin_container.size(); const int number_of_destination_items = r_destination_container.size(); KRATOS_ERROR_IF_NOT(number_of_origin_items == number_of_destination_items) << "Origin ( " << rOriginModelPart.Name() << " ) and destination ( " << rDestinationModelPart.Name() << " ) model parts have different number of items." << "\n\t- Number of origin items: " << number_of_origin_items << "\n\t- Number of destination items: " << number_of_destination_items << std::endl; IndexPartition<int>(number_of_origin_items).for_each([&](int i_node) { const auto& r_orig_item = *(r_origin_container.begin() + i_node); auto& r_dest_item = *(r_destination_container.begin() + i_node); if (r_orig_item.Is(rFlag) == CheckValue) { rSetterFunction(r_dest_item, rGetterFunction(r_orig_item)); } }); KRATOS_CATCH(""); } ///@} ///@name Private Acces ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class VariableUtils */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_VARIABLE_UTILS defined */
5817.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute private(j) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
interpolate_v2_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ( tensor->dims(), framework::make_ddim({1}), platform::errors::InvalidArgument("shape of dim tensor should be [1]")); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(5) #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0. && scale_h > 0. && scale_d > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1.0; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_w = scale_data[0]; scale_h = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
run_instance.h
#include <utility> #ifndef TEST_RUN_INSTANCE_H #define TEST_RUN_INSTANCE_H #include <string> #include <memory> #include <vector> #include <experimental/filesystem> #include <algorithm> #include <fstream> #include <iterator> #include <unordered_map> #include <opencv2/highgui/highgui.hpp> #include <opencv2/core/core.hpp> #include <bits/unordered_map.h> using namespace std::experimental::filesystem; std::unordered_map<std::string, std::vector<cv::Rect>> groundtruth; void read_all_groundtruth(const std::string &path_to_dirs){ std::vector<int> f_c; std::string str; std::string substr; std::vector<path> dirs; for(auto& p : directory_iterator(path_to_dirs)) { if (p.status().type() == file_type::directory) { dirs.push_back(p.path()); } } printf("start to read gt files\n"); for (int i=0; i<dirs.size(); ++i){ printf("%d of %lu\n", i+1, dirs.size()); auto rects = std::vector<cv::Rect>(); std::fstream gt_file(dirs[i] / "groundtruth.txt"); while (std::getline(gt_file, str)){ f_c.clear(); std::stringstream substr_stream(str); while (std::getline(substr_stream, substr, ',')){ f_c.push_back(std::stoi(substr)); } cv::Rect box; if (f_c.size() > 4){ box.x = int(fmin(f_c[0], fmin(f_c[2], fmin(f_c[4], f_c[6])))); box.y = int(fmin(f_c[1], fmin(f_c[3], fmin(f_c[5], f_c[7])))); box.width = int(fmax(f_c[0], fmax(f_c[2], fmax(f_c[4], f_c[6])))) - box.x; box.height = int(fmax(f_c[1], fmax(f_c[3], fmax(f_c[5], f_c[7])))) - box.y; } else { box.x = int(f_c[0]); box.y = int(f_c[1]); box.width = int(f_c[2]); box.height = int(f_c[3]); } rects.push_back(box); } groundtruth[dirs[i].filename().string()] = rects; } } class Statistics{ public: Statistics (std::string path_to_dirs, const int pref) : path_to_dirs(std::move(path_to_dirs)){ dirs = new std::vector<path>(); bboxes_info = new std::vector<std::string>(); file_names = new std::vector<path>(); dirs = new std::vector<path>(); is_new_video = true; prefix = std::to_string(pref); gt_index = 0; if (dirs->empty()){ for(auto& p : directory_iterator(this->path_to_dirs)) { if (p.status().type() == file_type::directory) { dirs->push_back(p.path()); } } } create_directory(path_to_bboxes_dir / prefix); make_file_names(dirs->back()); } double giou(const cv::Rect& box1, const cv::Rect& box2){ double x1 = std::max(box1.x, box2.x); double y1 = std::max(box1.y, box2.y); double x2 = std::min(box1.x + box1.width, box2.x + box2.width); double y2 = std::min(box1.y + box1.height, box2.y + box2.height); double intersection = std::max(x2 - x1, 0.) * std::max(y2 - y1, 0.); double union_ = box1.width * box1.height + box2.width * box2.height - intersection; double area_1 = box1.height * box1.width; double area_2 = box2.height * box2.width; double x1_c = std::min(box1.x, box2.x); double y1_c = std::min(box1.y, box2.y); double x2_c = std::max(box1.x + box1.width, box2.x + box2.width); double y2_c = std::max(box1.y + box1.height, box2.y + box2.height); double area_c = (x2_c - x1_c) * (y2_c - y1_c); double giou = intersection / union_ - (area_c - union_) / area_c; return giou; } bool check_is_new_video(){ return is_new_video; } bool try_get_next_file(std::string& path_to_image){ is_new_video = false; if (file_names->empty()){ make_bboxes_file(dirs->back()); // printf("--- %s\n", dirs->back().c_str()); dirs->pop_back(); gt_index = 0; if (dirs->empty()){ return false; } make_file_names(dirs->back()); } if (bboxes_info->empty()) is_new_video = true; path_to_image = file_names->back().string(); file_names->pop_back(); return true; } void bboxes_to_file(const cv::Rect& tr_box, const double iou_value){ bboxes_info->push_back(std::to_string(iou_value)); } cv::Rect read_current_groundtruth(){ return groundtruth[dirs->back().filename().string()][gt_index++]; } protected: void make_bboxes_file(const path& dir){ if (bboxes_info->empty()) return; std::ofstream file(path_to_bboxes_dir / prefix / (dir.filename().string() + ".csv")); std::ostream_iterator<std::string> out_it (file,"\n"); std::copy ( bboxes_info->begin(), bboxes_info->end(), out_it ); file.flush(); file.close(); bboxes_info->clear(); } void make_file_names(const path &dir){ for (auto& file : directory_iterator(dir)){ if (file.status().type() == file_type::regular && file.path().extension() == ".jpg"){ file_names->push_back(file.path()); } } sort(file_names->begin(), file_names->end(), [](path a, path b) { return a > b; }); } private: int gt_index; bool is_new_video; std::string path_to_bboxes_dir = "../bboxes_info"; std::string prefix; std::vector<std::string>* bboxes_info; std::vector<path>* file_names; std::vector<path>* dirs; std::string path_to_dirs; }; #include "genetic_algorithm.h" #include "kcftracker.hpp" #include "kalman_filter.h" #include <chrono> #include <omp.h> using namespace std::chrono; typedef steady_clock timestamp; void run_statistics(genetic_alg::Population& population, const std::string& path_to_vids, int threads_amount = 8) { bool show = false; int frames_to_kalman = 50000; printf("start to run population\n"); omp_set_dynamic(0); omp_set_num_threads(threads_amount); #pragma omp parallel for for (int i= 0; i<population.people.size(); ++i){ if (population.people[i]->p != -1 and !population.people[i]->is_mutated()){ continue; } cv::Mat frame; cv::Rect result; double iou = 0; int kalman_counter = 0; auto T = timestamp::now(); printf("%d of %zu\n", population.people[i]->get_number(), population.people.size()); Statistics stat(path_to_vids, population.people[i]->get_number()); std::unique_ptr<KCFTracker> tracker; std::unique_ptr<Kalman> kalman; std::string file_path; while (stat.try_get_next_file(file_path)) { frame = cv::imread(file_path, CV_LOAD_IMAGE_COLOR); if (stat.check_is_new_video() || iou <= 0 || kalman_counter > frames_to_kalman) { auto coords = stat.read_current_groundtruth(); tracker = std::make_unique<KCFTracker>( true,false, true, false); tracker->init(coords, frame); kalman = std::make_unique<Kalman>(); // printf("\nperson #%d: ", population.people[i]->get_number()); // for (int j=0; j<genetic_alg::GENOME_LENGTH; ++j){ // printf("[%d]=%f ", j, population.people[i]->data[j]); // } kalman->set_from_genome(population.people[i]->data); if (show) rectangle(frame, cv::Point(coords.x, coords.y), cv::Point(coords.x + coords.width, coords.y + coords.height), cv::Scalar(0, 255, 0), 4, 8); iou = 1; kalman_counter = 0; } else { T = timestamp::now(); result = tracker->update(frame); if (show) rectangle(frame, cv::Point(result.x, result.y), cv::Point(result.x + result.width, result.y + result.height), cv::Scalar(0, 255, 255), 4, 8); result = kalman->predict( double(duration_cast<microseconds>(timestamp::now() - T).count()) / 1000'000., result); if (show) rectangle(frame, cv::Point(result.x, result.y), cv::Point(result.x + result.width, result.y + result.height), cv::Scalar(255, 0, 255), 4, 8); iou = stat.giou(result, stat.read_current_groundtruth()); ++kalman_counter; } stat.bboxes_to_file(result, iou); if (show){ imshow("Image", frame); // if (27 == cv::waitKey(3)) { // return; // } } } } // ----------------------------------------------------------------------------------------- printf("start selection\n"); for (auto& person : population.people){ person->count_fitness(); } double mean_sum = 0; for (auto& person : population.people){ mean_sum += person->fitness_value; } double mean = mean_sum / population.people.size(); double variance_delta_sum = 0; for (auto& person : population.people){ variance_delta_sum += (person->fitness_value - mean) * (person->fitness_value - mean); } double variance = variance_delta_sum / (population.people.size() - 1); double standart_derivation = sqrt(variance); double F_i_sum = 0; for (auto& person : population.people){ F_i_sum += person->count_F_i(standart_derivation, mean); } for (auto& person : population.people){ person->count_probability(F_i_sum); person->log_info(); } // ----------------------------------------------------------------------------------------- printf("start create new population\n"); population.create_new_popuation(); } #endif //TEST_RUN_INSTANCE_H
eff.c
#if (defined(_OPENMP) || defined(SPEC_OPENMP)) && !defined(SPEC_SUPPRESS_OPENMP) && !defined(SPEC_AUTO_SUPPRESS_OPENMP) # undef OPENMP # define OPENMP #else # undef OPENMP #endif /* * eff.c: implement energy subroutines for 3 or 4 cartesian coordinates. * * Parallelization via OpenMP, creation of pair lists using a kd tree, and * optional calculation in 4D were added by Russ Brown (russ.brown@sun.com) */ /* * For OpenMP execution, energy values can fluctuate across repeat * executions when large molecular models and large numbers of cores are * used. This effect can be minimized is NOREDUCE is defined. */ /*********************************************************************** ECONS() ************************************************************************/ /* Calculate the constrained energy and first derivatives. */ static REAL_T econs(REAL_T * x, REAL_T * f) { int i, foff, threadnum, numthreads; REAL_T e_cons, rx, ry, rz, rw; e_cons = 0.0; /* Parallel execution for OpenMP unless NOREDUCE or NOPAR is defined. */ #if !defined(NOREDUCE) && !defined(NOPAR) && (!defined(SPEC) || defined(OPENMP)) #pragma omp parallel reduction (+: e_cons) private (i, rx, ry, rz, rw) #endif { /* * For execution under OpenMP when NOREDUCE is not defined, * multi-threaded execution is possible because each thread * accesses its own region of the f array. Hence, compute * an offset into the f array, and use the OpenMP thread number * and number of threads information. * * For execution under OpenMP when NOREDUCE is defined, * multi-threaded execution is not possible because all threads * share the same region of the f array and it is not possible * to parallelize and avoid race conditions. Hence, do not * compute an offset into the f array, and get the thread number * and number of threads from the mytaskid and numtasks variables * that have been set to 0 and 1, respectively in the sff.c file. * Note that this case is identical to single-threaded execution. * * For execution under ScaLAPACK or MPI, each process has its * own copy of the f array. Hence, do not compute an offset into * the f array, and get the thread number and number of threads * that have been stored in the mytaskid and numtasks variables * that have been set by the mpiinit() function of the sff.c file. * * The thread number and number of threads are used to determine * the specific iterations of the below for loop that will be * executed by specific OpenMP threads or MPI processes. */ #if defined(OPENMP) && !defined(NOREDUCE) threadnum = omp_get_thread_num(); numthreads = omp_get_num_threads(); foff = dim * prm->Natom * threadnum; #else threadnum = mytaskid; numthreads = numtasks; foff = 0; #endif /* * Loop over all atoms. Map loop indices onto OpenMP threads * or MPI tasks using (static, 1) scheduling. */ for (i = threadnum; i < prm->Natom; i += numthreads) { if (constrained[i]) { rx = x[dim * i] - x0[dim * i]; ry = x[dim * i + 1] - x0[dim * i + 1]; rz = x[dim * i + 2] - x0[dim * i + 2]; e_cons += wcons * (rx * rx + ry * ry + rz * rz); f[foff + dim * i] += 2. * wcons * rx; f[foff + dim * i + 1] += 2. * wcons * ry; f[foff + dim * i + 2] += 2. * wcons * rz; if (dim == 4) { rw = x[dim * i + 3] - x0[dim * i + 3]; e_cons += wcons * rw * rw; f[foff + dim * i + 3] += 2. * wcons * rw; } } } } return (e_cons); } /*********************************************************************** EBOND() ************************************************************************/ /* Calculate the bond stretching energy and first derivatives.*/ static REAL_T ebond(int nbond, int *a1, int *a2, int *atype, REAL_T * Rk, REAL_T * Req, REAL_T * x, REAL_T * f) { int i, at1, at2, atyp, foff, threadnum, numthreads; REAL_T e_bond, r, rx, ry, rz, rw, r2, s, db, df, e; e_bond = 0.0; /* Parallel execution for OpenMP unless NOREDUCE or NOPAR is defined. */ #if !defined(NOREDUCE) && !defined(NOPAR) && (!defined(SPEC) || defined(OPENMP)) #pragma omp parallel reduction (+: e_bond) \ private (i, foff, at1, at2, atyp, threadnum, numthreads, \ rx, ry, rz, rw, r2, s, r, db, df, e ) #endif { /* * For execution under OpenMP when NOREDUCE is not defined, * multi-threaded execution is possible because each thread * accesses its own region of the f array. Hence, compute * an offset into the f array, and use the OpenMP thread number * and number of threads information. * * For execution under OpenMP when NOREDUCE is defined, * multi-threaded execution is not possible because all threads * share the same region of the f array and it is not possible * to parallelize and avoid race conditions. Hence, do not * compute an offset into the f array, and get the thread number * and number of threads from the mytaskid and numtasks variables * that have been set to 0 and 1, respectively in the sff.c file. * Note that this case is identical to single-threaded execution. * * For execution under ScaLAPACK or MPI, each process has its * own copy of the f array. Hence, do not compute an offset into * the f array, and get the thread number and number of threads * that have been stored in the mytaskid and numtasks variables * that have been set by the mpiinit() function of the sff.c file. * * The thread number and number of threads are used to determine * the specific iterations of the below for loop that will be * executed by specific OpenMP threads or MPI processes. */ #if defined(OPENMP) && !defined(NOREDUCE) threadnum = omp_get_thread_num(); numthreads = omp_get_num_threads(); foff = dim * prm->Natom * threadnum; #else threadnum = mytaskid; numthreads = numtasks; foff = 0; #endif /* * Loop over all 1-2 bonds. Map loop indices onto OpenMP threads * or MPI tasks using (static, 1) scheduling. */ for (i = threadnum; i < nbond; i += numthreads) { at1 = dim * a1[i] / 3; at2 = dim * a2[i] / 3; atyp = atype[i] - 1; rx = x[at1] - x[at2]; ry = x[at1 + 1] - x[at2 + 1]; rz = x[at1 + 2] - x[at2 + 2]; r2 = rx * rx + ry * ry + rz * rz; if (dim == 4) { rw = x[at1 + 3] - x[at2 + 3]; r2 += rw * rw; } s = sqrt(r2); r = 2.0 / s; db = s - Req[atyp]; df = Rk[atyp] * db; e = df * db; e_bond += e; df *= r; f[foff + at1 + 0] += rx * df; f[foff + at1 + 1] += ry * df; f[foff + at1 + 2] += rz * df; f[foff + at2 + 0] -= rx * df; f[foff + at2 + 1] -= ry * df; f[foff + at2 + 2] -= rz * df; if (dim == 4) { f[foff + at1 + 3] += rw * df; f[foff + at2 + 3] -= rw * df; } } } return (e_bond); } /*********************************************************************** EANGL() ************************************************************************/ /* Calculate the bond bending energy and first derivatives. */ static REAL_T eangl(int nang, int *a1, int *a2, int *a3, int *atype, REAL_T * Tk, REAL_T * Teq, REAL_T * x, REAL_T * f) { int i, atyp, at1, at2, at3, foff, threadnum, numthreads; REAL_T dxi, dyi, dzi, dwi, dxj, dyj, dzj, dwj, ri2, rj2, ri, rj, rir, rjr; REAL_T dxir, dyir, dzir, dwir, dxjr, dyjr, dzjr, dwjr, cst, at, da, df, e, e_theta; REAL_T xtmp, dxtmp, ytmp, wtmp, dytmp, ztmp, dztmp, dwtmp; e_theta = 0.0; /* Parallel execution for OpenMP unless NOREDUCE or NOPAR is defined. */ #if !defined(NOREDUCE) && !defined(NOPAR) && (!defined(SPEC) || defined(OPENMP)) #pragma omp parallel reduction (+: e_theta) \ private (i, foff, at1, at2, at3, atyp, threadnum, numthreads, \ dxi, dyi, dzi, dwi, dxj, dyj, dzj, dwj, ri2, rj2, ri, rj, rir, rjr, \ dxir, dyir, dzir, dwir, dxjr, dyjr, dzjr, dwjr, cst, at, da, df, e, \ xtmp, dxtmp, ytmp, dytmp, ztmp, dztmp, wtmp, dwtmp) #endif { /* * For execution under OpenMP when NOREDUCE is not defined, * multi-threaded execution is possible because each thread * accesses its own region of the f array. Hence, compute * an offset into the f array, and use the OpenMP thread number * and number of threads information. * * For execution under OpenMP when NOREDUCE is defined, * multi-threaded execution is not possible because all threads * share the same region of the f array and it is not possible * to parallelize and avoid race conditions. Hence, do not * compute an offset into the f array, and get the thread number * and number of threads from the mytaskid and numtasks variables * that have been set to 0 and 1, respectively in the sff.c file. * Note that this case is identical to single-threaded execution. * * For execution under ScaLAPACK or MPI, each process has its * own copy of the f array. Hence, do not compute an offset into * the f array, and get the thread number and number of threads * that have been stored in the mytaskid and numtasks variables * that have been set by the mpiinit() function of the sff.c file. * * The thread number and number of threads are used to determine * the specific iterations of the below for loop that will be * executed by specific OpenMP threads or MPI processes. */ #if defined(OPENMP) && !defined(NOREDUCE) threadnum = omp_get_thread_num(); numthreads = omp_get_num_threads(); foff = dim * prm->Natom * threadnum; #else threadnum = mytaskid; numthreads = numtasks; foff = 0; #endif /* * Loop over all 1-3 bonds. Map loop indices onto OpenMP threads * or MPI tasks using (static, 1) scheduling. */ for (i = threadnum; i < nang; i += numthreads) { at1 = dim * a1[i] / 3; at2 = dim * a2[i] / 3; at3 = dim * a3[i] / 3; atyp = atype[i] - 1; dxi = x[at1] - x[at2]; dyi = x[at1 + 1] - x[at2 + 1]; dzi = x[at1 + 2] - x[at2 + 2]; dxj = x[at3] - x[at2]; dyj = x[at3 + 1] - x[at2 + 1]; dzj = x[at3 + 2] - x[at2 + 2]; ri2 = dxi * dxi + dyi * dyi + dzi * dzi; rj2 = dxj * dxj + dyj * dyj + dzj * dzj; if (dim == 4) { dwi = x[at1 + 3] - x[at2 + 3]; dwj = x[at3 + 3] - x[at2 + 3]; ri2 += dwi * dwi; rj2 += dwj * dwj; } ri = sqrt(ri2); rj = sqrt(rj2); rir = 1. / ri; rjr = 1. / rj; dxir = dxi * rir; dyir = dyi * rir; dzir = dzi * rir; dxjr = dxj * rjr; dyjr = dyj * rjr; dzjr = dzj * rjr; cst = dxir * dxjr + dyir * dyjr + dzir * dzjr; if (dim == 4) { dwir = dwi * rir; dwjr = dwj * rjr; cst += dwir * dwjr; } if (cst > 1.0) cst = 1.0; if (cst < -1.0) cst = -1.0; at = acos(cst); da = at - Teq[atyp]; df = da * Tk[atyp]; e = df * da; e_theta = e_theta + e; df = df + df; at = sin(at); if (at > 0 && at < 1.e-3) at = 1.e-3; else if (at < 0 && at > -1.e-3) at = -1.e-3; df = -df / at; xtmp = df * rir * (dxjr - cst * dxir); dxtmp = df * rjr * (dxir - cst * dxjr); ytmp = df * rir * (dyjr - cst * dyir); dytmp = df * rjr * (dyir - cst * dyjr); ztmp = df * rir * (dzjr - cst * dzir); dztmp = df * rjr * (dzir - cst * dzjr); f[foff + at1 + 0] += xtmp; f[foff + at3 + 0] += dxtmp; f[foff + at2 + 0] -= xtmp + dxtmp; f[foff + at1 + 1] += ytmp; f[foff + at3 + 1] += dytmp; f[foff + at2 + 1] -= ytmp + dytmp; f[foff + at1 + 2] += ztmp; f[foff + at3 + 2] += dztmp; f[foff + at2 + 2] -= ztmp + dztmp; if (dim == 4) { wtmp = df * rir * (dwjr - cst * dwir); dwtmp = df * rjr * (dwir - cst * dwjr); f[foff + at1 + 3] += wtmp; f[foff + at3 + 3] += dwtmp; f[foff + at2 + 3] -= wtmp + dwtmp; } } } return (e_theta); } /*********************************************************************** EPHI() ************************************************************************/ /* Calculate the dihedral torsion energy and first derivatives. */ static REAL_T ephi(int nphi, int *a1, int *a2, int *a3, int *a4, int *atype, REAL_T * Pk, REAL_T * Pn, REAL_T * Phase, REAL_T * x, REAL_T * f) { REAL_T e, co, den, co1, uu, vv, uv, ax, bx, cx, ay, by, cy, az, bz, cz, aw, bw, cw; REAL_T a0x, b0x, c0x, a0y, b0y, c0y, a0z, b0z, c0z, a0w, b0w, c0w, a1x, b1x; REAL_T a1y, b1y, a1z, b1z, a1w, b1w, a2x, b2x, a2y, b2y, a2z, b2z, a2w, b2w; REAL_T dd1x, dd2x, dd3x, dd4x, dd1y, dd2y, dd3y, dd4y, dd1z, dd2z, dd3z, dd4z; REAL_T dd1w = 0.0, dd2w = 0.0, dd3w = 0.0, dd4w = 0.0; REAL_T df, aa, bb, cc, ab, bc, ac, cosq; REAL_T ktors, phase, e_tors; int i, at1, at2, at3, at4, atyp, foff, threadnum, numthreads; REAL_T ux, uy, uz, vx, vy, vz, delta, phi, dx1, dy1, dz1, yy, pi; pi = 3.1415927; e_tors = 0.0; /* Parallel execution for OpenMP unless NOREDUCE or NOPAR is defined. */ #if !defined(NOREDUCE) && !defined(NOPAR) && (!defined(SPEC) || defined(OPENMP)) #pragma omp parallel reduction (+: e_tors) \ private (i, at1, at2, at3, at4, atyp, ax, ay, az, aw, bx, by, bz, bw, \ cx, cy, cz, cw, ab, bc, ac, aa, bb, cc, uu, vv, uv, den, co, co1, \ a0x, a0y, a0z, a0w, b0x, b0y, b0z, b0w, c0x, c0y, c0z, c0w, \ a1x, a1y, a1z, a1w, b1x, b1y, b1z, b1w, a2x, a2y, a2z, a2w, \ b2x, b2y, b2z, b2w, dd1x, dd1y, dd1z, dd1w, dd2x, dd2y, dd2z, dd2w, \ dd3x, dd3y, dd3z, dd3w, dd4x, dd4y, dd4z, dd4w, phi, \ ux, uy, uz, vx, vy, vz, dx1, dy1, dz1, delta, df, e, yy, phase, \ ktors, cosq, threadnum, numthreads, foff) #endif { /* * For execution under OpenMP when NOREDUCE is not defined, * multi-threaded execution is possible because each thread * accesses its own region of the f array. Hence, compute * an offset into the f array, and use the OpenMP thread number * and number of threads information. * * For execution under OpenMP when NOREDUCE is defined, * multi-threaded execution is not possible because all threads * share the same region of the f array and it is not possible * to parallelize and avoid race conditions. Hence, do not * compute an offset into the f array, and get the thread number * and number of threads from the mytaskid and numtasks variables * that have been set to 0 and 1, respectively in the sff.c file. * Note that this case is identical to single-threaded execution. * * For execution under ScaLAPACK or MPI, each process has its * own copy of the f array. Hence, do not compute an offset into * the f array, and get the thread number and number of threads * that have been stored in the mytaskid and numtasks variables * that have been set by the mpiinit() function of the sff.c file. * * The thread number and number of threads are used to determine * the specific iterations of the below for loop that will be * executed by specific OpenMP threads or MPI processes. */ #if defined(OPENMP) && !defined(NOREDUCE) threadnum = omp_get_thread_num(); numthreads = omp_get_num_threads(); foff = dim * prm->Natom * threadnum; #else threadnum = mytaskid; numthreads = numtasks; foff = 0; #endif /* * Loop over all 1-4 bonds. Map loop indices onto OpenMP threads * or MPI tasks using (static, 1) scheduling. */ for (i = threadnum; i < nphi; i += numthreads) { at1 = dim * a1[i] / 3; at2 = dim * a2[i] / 3; at3 = dim * abs(a3[i]) / 3; at4 = dim * abs(a4[i]) / 3; atyp = atype[i] - 1; ax = x[at2 + 0] - x[at1 + 0]; ay = x[at2 + 1] - x[at1 + 1]; az = x[at2 + 2] - x[at1 + 2]; bx = x[at3 + 0] - x[at2 + 0]; by = x[at3 + 1] - x[at2 + 1]; bz = x[at3 + 2] - x[at2 + 2]; cx = x[at4 + 0] - x[at3 + 0]; cy = x[at4 + 1] - x[at3 + 1]; cz = x[at4 + 2] - x[at3 + 2]; if (dim == 4) { aw = x[at2 + 3] - x[at1 + 3]; bw = x[at3 + 3] - x[at2 + 3]; cw = x[at4 + 3] - x[at3 + 3]; # define DOT4(a,b,c,d,e,f,g,h) a*e + b*f + c*g + d*h ab = DOT4(ax, ay, az, aw, bx, by, bz, bw); bc = DOT4(bx, by, bz, bw, cx, cy, cz, cw); ac = DOT4(ax, ay, az, aw, cx, cy, cz, cw); aa = DOT4(ax, ay, az, aw, ax, ay, az, aw); bb = DOT4(bx, by, bz, bw, bx, by, bz, bw); cc = DOT4(cx, cy, cz, cw, cx, cy, cz, cw); } else { # define DOT3(a,b,c,d,e,f) a*d + b*e + c*f ab = DOT3(ax, ay, az, bx, by, bz); bc = DOT3(bx, by, bz, cx, cy, cz); ac = DOT3(ax, ay, az, cx, cy, cz); aa = DOT3(ax, ay, az, ax, ay, az); bb = DOT3(bx, by, bz, bx, by, bz); cc = DOT3(cx, cy, cz, cx, cy, cz); } uu = (aa * bb) - (ab * ab); vv = (bb * cc) - (bc * bc); uv = (ab * bc) - (ac * bb); den = 1.0 / sqrt(uu * vv); co = uv * den; co1 = 0.5 * co * den; a0x = -bc * bx + bb * cx; a0y = -bc * by + bb * cy; a0z = -bc * bz + bb * cz; b0x = ab * cx + bc * ax - 2. * ac * bx; b0y = ab * cy + bc * ay - 2. * ac * by; b0z = ab * cz + bc * az - 2. * ac * bz; c0x = ab * bx - bb * ax; c0y = ab * by - bb * ay; c0z = ab * bz - bb * az; a1x = 2. * uu * (-cc * bx + bc * cx); a1y = 2. * uu * (-cc * by + bc * cy); a1z = 2. * uu * (-cc * bz + bc * cz); b1x = 2. * uu * (bb * cx - bc * bx); b1y = 2. * uu * (bb * cy - bc * by); b1z = 2. * uu * (bb * cz - bc * bz); a2x = -2. * vv * (bb * ax - ab * bx); a2y = -2. * vv * (bb * ay - ab * by); a2z = -2. * vv * (bb * az - ab * bz); b2x = 2. * vv * (aa * bx - ab * ax); b2y = 2. * vv * (aa * by - ab * ay); b2z = 2. * vv * (aa * bz - ab * az); dd1x = (a0x - a2x * co1) * den; dd1y = (a0y - a2y * co1) * den; dd1z = (a0z - a2z * co1) * den; dd2x = (-a0x - b0x - (a1x - a2x - b2x) * co1) * den; dd2y = (-a0y - b0y - (a1y - a2y - b2y) * co1) * den; dd2z = (-a0z - b0z - (a1z - a2z - b2z) * co1) * den; dd3x = (b0x - c0x - (-a1x - b1x + b2x) * co1) * den; dd3y = (b0y - c0y - (-a1y - b1y + b2y) * co1) * den; dd3z = (b0z - c0z - (-a1z - b1z + b2z) * co1) * den; dd4x = (c0x - b1x * co1) * den; dd4y = (c0y - b1y * co1) * den; dd4z = (c0z - b1z * co1) * den; if (dim == 4) { a0w = -bc * bw + bb * cw; b0w = ab * cw + bc * aw - 2. * ac * bw; c0w = ab * bw - bb * aw; a1w = 2. * uu * (-cc * bw + bc * cw); b1w = 2. * uu * (bb * cw - bc * bw); a2w = -2. * vv * (bb * aw - ab * bw); b2w = 2. * vv * (aa * bw - ab * aw); dd1w = (a0w - a2w * co1) * den; dd2w = (-a0w - b0w - (a1w - a2w - b2w) * co1) * den; dd3w = (b0w - c0w - (-a1w - b1w + b2w) * co1) * den; dd4w = (c0w - b1w * co1) * den; } if (prm->Nhparm && a3[i] < 0) { /* here we will use a quadratic form for the improper torsion */ /* we are using the NHPARM variable in prmtop to trigger this */ /* WARNING: phi itself is here calculated from the first three coords-- --- may fail! */ /* Note: The following improper torsion code does not support 4D! */ co = co > 1.0 ? 1.0 : co; co = co < -1.0 ? -1.0 : co; phi = acos(co); /* now calculate sin(phi) because cos(phi) is symmetric, so we can decide between +-phi. */ ux = ay * bz - az * by; uy = az * bx - ax * bz; uz = ax * by - ay * bx; vx = by * cz - bz * cy; vy = bz * cx - bx * cz; vz = bx * cy - by * cx; dx1 = uy * vz - uz * vy; dy1 = uz * vx - ux * vz; dz1 = ux * vy - uy * vx; dx1 = DOT3(dx1, dy1, dz1, bx, by, bz); if (dx1 < 0.0) phi = -phi; delta = phi - Phase[atyp]; delta = delta > pi ? pi : delta; delta = delta < -pi ? -pi : delta; df = Pk[atyp] * delta; e = df * delta; e_tors += e; yy = sin(phi); /* Decide what expansion to use Check first for the "normal" expression, since it will be the most used the 0.001 value could be lowered for increased precision. This insures ~1e-05% error for sin(phi)=0.001 */ if (fabs(yy) > 0.001) { df = -2.0 * df / yy; } else { if (fabs(delta) < 0.10) { if (Phase[atyp] == 0.0) { df = -2.0 * Pk[atyp] * (1 + phi * phi / 6.0); } else { if (fabs(Phase[atyp]) == pi) { df = 2.0 * Pk[atyp] * (1 + delta * delta / 6.0); } } } else { if ((phi > 0.0 && phi < (pi / 2.0)) || (phi < 0.0 && phi > -pi / 2.0)) df = df * 1000.; else df = -df * 1000.; } } } else { multi_term: if (fabs(Phase[atyp] - 3.142) < 0.01) phase = -1.0; else phase = 1.0; ktors = Pk[atyp]; switch ((int) fabs(Pn[atyp])) { case 1: e = ktors * (1.0 + phase * co); df = phase * ktors; break; case 2: e = ktors * (1.0 + phase * (2. * co * co - 1.)); df = phase * ktors * 4. * co; break; case 3: cosq = co * co; e = ktors * (1.0 + phase * co * (4. * cosq - 3.)); df = phase * ktors * (12. * cosq - 3.); break; case 4: cosq = co * co; e = ktors * (1.0 + phase * (8. * cosq * (cosq - 1.) + 1.)); df = phase * ktors * co * (32. * cosq - 16.); break; case 6: cosq = co * co; e = ktors * (1.0 + phase * (32. * cosq * cosq * cosq - 48. * cosq * cosq + 18. * cosq - 1.)); df = phase * ktors * co * (192. * cosq * cosq - 192. * cosq + 36.); break; default: fprintf(stderr, "bad value for Pn: %d %d %d %d %8.3f\n", at1, at2, at3, at4, Pn[atyp]); exit(1); } e_tors += e; } f[foff + at1 + 0] += df * dd1x; f[foff + at1 + 1] += df * dd1y; f[foff + at1 + 2] += df * dd1z; f[foff + at2 + 0] += df * dd2x; f[foff + at2 + 1] += df * dd2y; f[foff + at2 + 2] += df * dd2z; f[foff + at3 + 0] += df * dd3x; f[foff + at3 + 1] += df * dd3y; f[foff + at3 + 2] += df * dd3z; f[foff + at4 + 0] += df * dd4x; f[foff + at4 + 1] += df * dd4y; f[foff + at4 + 2] += df * dd4z; if (dim == 4) { f[foff + at1 + 3] += df * dd1w; f[foff + at2 + 3] += df * dd2w; f[foff + at3 + 3] += df * dd3w; f[foff + at4 + 3] += df * dd4w; } #ifdef PRINT_EPHI fprintf(nabout, "%4d%4d%4d%4d%4d%8.3f\n", i + 1, at1, at2, at3, at4, e); fprintf(nabout, "%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f\n", -df * dd1x, -df * dd1y, -df * dd1z, -df * dd2x, -df * dd2y, -df * dd2z, -df * dd3x, -df * dd3y); fprintf(nabout, "%10.5f%10.5f%10.5f%10.5f\n", -df * dd3z, -df * dd4x, -df * dd4y, -df * dd4z); #endif if (Pn[atyp] < 0.0) { atyp++; goto multi_term; } } } return (e_tors); } /*********************************************************************** NBOND() ************************************************************************/ /* * Calculate the non-bonded energy and first derivatives. * This function is complicated by the fact that it must * process two forms of pair lists: the 1-4 pair list and * the non-bonded pair list. The non-bonded pair list * must be modified by the excluded atom list whereas the * 1-4 pair list is used unmodified. Also, the non-bonded * pair list comprises lower and upper triangles whereas * the 1-4 pair list comprises an upper triangle only. * * Calling parameters are as follows: * * lpears - the number of pairs on the lower triangle pair list * upears - the number of pairs on the upper trianble pair list * pearlist - either the 1-4 pair list or the non-bonded pair list * N14 - set to 0 for the non-bonded pair list, 1 for the 1-4 pair list * x - the atomic coordinate array * f - the gradient vector * enb - Van der Waals energy return value, passed by reference * eel - Coulombic energy return value, passed by reference * enbfac - scale factor for Van der Waals energy * eelfac - scale factor for Coulombic energy */ static int nbond(int *lpears, int *upears, int **pearlist, int N14, REAL_T * x, REAL_T * f, REAL_T * enb, REAL_T * eel, REAL_T enbfac, REAL_T eelfac) { int i, j, i34, j34, k, ic, npr, lpair, iaci, foff, threadnum, numthreads; int *iexw; REAL_T dumx, dumy, dumz, dumw = 0.0, cgi, r2inv, df2, r6, r10, f1, f2; REAL_T dedx, dedy, dedz, dedw, df, enbfaci, eelfaci, evdw, elec; REAL_T xi, yi, zi, wi = 0.0, xj, yj, zj, wj, xij, yij, zij, wij, r, r2; REAL_T dis, kij, d0, diff, rinv, rs, rssq, eps1, epsi, cgijr, pow; int ibig, isml; #define SIG 0.3 #define DIW 78.0 #define C1 38.5 evdw = 0.; elec = 0.; enbfaci = 1. / enbfac; eelfaci = 1. / eelfac; /* * If NOREDUCE or NOPAR is defined, do not execute in parallel * under OpenMP for the 1-4 nonbonded list. When NOREDUCE is * defined, all OpenMP tasks share one copy of the f array, * and because the 1-4 pair list is in upper triangular form * only, it is not possible to avoid race conditions when * updating the f array, so execution is single-threaded. * When NOPAR is defined, execution is single-threaded as well. */ #if !defined(SPEC) || defined(OPENMP) #if defined(NOREDUCE) || defined(NOPAR) #pragma omp parallel if (N14 == 0) \ reduction (+: evdw, elec) \ private (i, j, iexw, npr, iaci, \ xi, yi, zi, wi, xij, yij, zij, wij, dumx, dumy, dumz, dumw, \ cgi, k, r2, r2inv, r, rinv, rs, rssq, pow, \ eps1, epsi, cgijr, df2, ic, r6, f2, f1, df, dis, d0, kij, \ diff, ibig, isml, dedx, dedy, dedz, dedw, r10, \ threadnum, numthreads, foff, lpair, i34, j34, xj, yj, zj, wj) #else #pragma omp parallel \ reduction (+: evdw, elec) \ private (i, j, iexw, npr, iaci, \ xi, yi, zi, wi, xij, yij, zij, wij, dumx, dumy, dumz, dumw, \ cgi, k, r2, r2inv, r, rinv, rs, rssq, pow, \ eps1, epsi, cgijr, df2, ic, r6, f2, f1, df, dis, d0, kij, \ diff, ibig, isml, dedx, dedy, dedz, dedw, r10, \ threadnum, numthreads, foff, lpair, i34, j34, xj, yj, zj, wj) #endif #endif /* !SPEC || OPENMP */ { /* * For execution under OpenMP when NOREDUCE is not defined, * multi-threaded execution is possible because each thread * accesses its own region of the f array. Hence, compute * an offset into the f array, and use the OpenMP thread number * and number of threads information. * * For execution under OpenMP when NOREDUCE is defined, * multi-threaded execution is not possible because all threads * share the same region of the f array and it is not possible * to parallelize and avoid race conditions. Hence, do not * compute an offset into the f array, and get the thread number * and number of threads from the mytaskid and numtasks variables * that have been set to 0 and 1, respectively in the sff.c file. * Note that this case is identical to single-threaded execution. * * For execution under ScaLAPACK or MPI, each process has its * own copy of the f array. Hence, do not compute an offset into * the f array, and get the thread number and number of threads * that have been stored in the mytaskid and numtasks variables * that have been set by the mpiinit() function of the sff.c file. * * The thread number and number of threads are used to determine * the specific iterations of the below for loop that will be * executed by specific OpenMP threads or MPI processes. */ #if defined(OPENMP) && !defined(NOREDUCE) threadnum = omp_get_thread_num(); numthreads = omp_get_num_threads(); foff = dim * prm->Natom * threadnum; #else threadnum = mytaskid; numthreads = numtasks; foff = 0; #endif /* * Allocate and initialize the iexw array used for skipping excluded * atoms. Note that because of the manner in which iexw is used, it * is necessary to initialize it before only the first iteration of * the following loop. */ iexw = ivector(-1, prm->Natom); for (i = -1; i < prm->Natom; i++) { iexw[i] = -1; } /* * Loop over all atoms i except for the final atom. * * If OPENMP and NOREDUCE are defined, this (i,j) loop nest will * update f[i34 + 0..3] only, except when L14 != 0. * * For MPI or ScaLAPACK, explicitly assign tasks to loop indices * for the following loop in a manner equivalent to (static, N) * scheduling for OpenMP. For OpenMP use (dynamic, N) scheduling. * * Synchronization of OpenMP threads will occur following this loop * because the parallel region ends after this loop. Following * synchronization, a reduction of the sumdeijda array will be * performed. * * Synchronization of MPI tasks will occur via the MPI_Allreduce * function that is called from within mme34. */ #if !defined(SPEC) || defined(OPENMP) #pragma omp for schedule(dynamic, blocksize) #endif for (i = 0; i < prm->Natom - 1; i++) { #if defined(MPI) || defined(SCALAPACK) if (!myroc(i, blocksize, numthreads, threadnum)) continue; #endif /* Check whether there are any atoms j on the pair list of atom i. */ npr = upears[i]; if (npr <= 0) continue; iaci = prm->Ntypes * (prm->Iac[i] - 1); cgi = eelfaci * prm->Charges[i]; dumx = dumy = dumz = 0.0; i34 = dim * i; xi = x[i34 + 0]; yi = x[i34 + 1]; zi = x[i34 + 2]; if (dim == 4) { dumw = 0.0; wi = x[i34 + 3]; } /* * Expand the excluded list into the iexw array by storing i * at array address j. */ for (j = 0; j < prm->Iblo[i]; j++) { iexw[IexclAt[i][j] - 1] = i; } /* * If the 'N14' calling parameter is clear, use the beginning * address of the upper triangle pair list, which happens * to be the number of atoms on the lower triangle pair list. * If the 'N14' calling parameter is set, the beginning * address is zero because no lower triangle pair list is * used for the N14 interactions. */ if (N14 == 0) { lpair = lpears[i]; } else { lpair = 0; } /* Select atom j from the pair list. Non-graceful error handling. */ for (k = 0; k < npr; k++) { if (pearlist[i] == NULL) { fprintf(nabout, "NULL pair list entry in nbond loop 1, taskid = %d\n", mytaskid); fflush(nabout); } j = pearlist[i][lpair + k]; j34 = dim * j; /* * If the 'N14' calling parameter is clear, check whether * this i,j pair is exempted by the excluded atom list. */ if (N14 != 0 || iexw[j] != i) { xij = xi - x[j34 + 0]; yij = yi - x[j34 + 1]; zij = zi - x[j34 + 2]; r2 = xij * xij + yij * yij + zij * zij; if (dim == 4) { wij = wi - x[j34 + 3]; r2 += wij * wij; } r2inv = 1.0 / r2; r = sqrt(r2); rinv = r * r2inv; /* Calculate the energy and derivatives according to dield. */ if (dield == -3) { /* special code Ramstein & Lavery dielectric, 94 force field */ rs = SIG * r; rssq = rs * rs; pow = exp(-rs); eps1 = rssq + rs + rs + 2.0; epsi = 1.0 / (DIW - C1 * pow * eps1); cgijr = cgi * prm->Charges[j] * rinv * epsi; elec += cgijr; df2 = -cgijr * (1.0 + C1 * pow * rs * rssq * epsi); ic = prm->Cno[iaci + prm->Iac[j] - 1] - 1; if (ic >= 0) { r6 = r2inv * r2inv * r2inv; f2 = prm->Cn2[ic] * r6; f1 = prm->Cn1[ic] * r6 * r6; evdw += (f1 - f2) * enbfaci; df = (df2 + (6.0 * f2 - 12.0 * f1) * enbfaci) * rinv; } else { df = df2 * rinv; } } else if (dield == -4) { /* distance-dependent dielectric code, 94 ff */ /* epsilon = r */ rs = cgi * prm->Charges[j] * r2inv; df2 = -2.0 * rs; elec += rs; ic = prm->Cno[iaci + prm->Iac[j] - 1] - 1; if (ic >= 0) { r6 = r2inv * r2inv * r2inv; f2 = prm->Cn2[ic] * r6; f1 = prm->Cn1[ic] * r6 * r6; evdw += (f1 - f2) * enbfaci; df = (df2 + (6.0 * f2 - 12.0 * f1) * enbfaci) * rinv; } else { df = df2 * rinv; } } else if (dield == -5) { /* non-bonded term from yammp */ dis = r; ic = prm->Cno[iaci + prm->Iac[j] - 1] - 1; d0 = prm->Cn2[ic]; if (dis < d0) { kij = prm->Cn1[ic]; diff = dis - d0; evdw += kij * diff * diff; df = 2.0 * kij * diff; } else { df = 0.0; } } else { /* * Code for various dielectric models. * The df2 variable should hold r(dV/dr). */ if (dield == 0) { /* epsilon = r */ rs = cgi * prm->Charges[j] * r2inv; df2 = -2.0 * rs; elec += rs; } else if (dield == 1) { /* epsilon = 1 */ rs = cgi * prm->Charges[j] * rinv; df2 = -rs; elec += rs; } else if (dield == -2) { /* Ramstein & Lavery dielectric, PNAS 85, 7231 (1988). */ rs = SIG * r; rssq = rs * rs; pow = exp(-rs); eps1 = rssq + rs + rs + 2.0; epsi = 1.0 / (DIW - C1 * pow * eps1); cgijr = cgi * prm->Charges[j] * rinv * epsi; elec += cgijr; df2 = -cgijr * (1.0 + C1 * pow * rs * rssq * epsi); } /* Calculate either Van der Waals or hydrogen bonded term. */ ic = prm->Cno[iaci + prm->Iac[j] - 1]; if (ic > 0 || enbfac != 1.0) { if (ic > 0) { ic--; } else { ibig = prm->Iac[i] > prm->Iac[j] ? prm->Iac[i] : prm->Iac[j]; isml = prm->Iac[i] > prm->Iac[j] ? prm->Iac[j] : prm->Iac[i]; ic = ibig * (ibig - 1) / 2 + isml - 1; } r6 = r2inv * r2inv * r2inv; f2 = prm->Cn2[ic] * r6; f1 = prm->Cn1[ic] * r6 * r6; evdw += (f1 - f2) * enbfaci; df = (df2 + (6.0 * f2 - 12.0 * f1) * enbfaci) * rinv; #if 0 if (enbfac != 1.0) nb14 += (f1 - f2) * enbfaci; #endif } else { ic = -ic - 1; r10 = r2inv * r2inv * r2inv * r2inv * r2inv; f2 = prm->HB10[ic] * r10; f1 = prm->HB12[ic] * r10 * r2inv; evdw += (f1 - f2) * enbfaci; df = (df2 + (10.0 * f2 - 12.0 * f1) * enbfaci) * rinv; #if 0 hbener += (f1 - f2) * enbfaci; #endif } } /* * The df term contains one more factor of Dij in the denominator * so that terms such as dedx do not need to include 1/Dij. * * Update the gradient for atom j. */ df *= rinv; dedx = df * xij; dedy = df * yij; dedz = df * zij; dumx += dedx; dumy += dedy; dumz += dedz; /* * Update the gradient for the 1-4 pair list or * if either OPENMP or NOREDUCE is not defined. */ if (N14 != 0) { f[foff + j34 + 0] -= dedx; f[foff + j34 + 1] -= dedy; f[foff + j34 + 2] -= dedz; } else { #if !defined(OPENMP) || !defined(NOREDUCE) f[foff + j34 + 0] -= dedx; f[foff + j34 + 1] -= dedy; f[foff + j34 + 2] -= dedz; #endif } if (dim == 4) { dedw = df * wij; dumw += dedw; if (N14 != 0) { f[foff + j34 + 3] -= dedw; } else { #if !defined(OPENMP) || !defined(NOREDUCE) f[foff + j34 + 3] -= dedw; #endif } } } } /* For atom i, the gradient is updated in the i-loop only. */ f[foff + i34 + 0] += dumx; f[foff + i34 + 1] += dumy; f[foff + i34 + 2] += dumz; if (dim == 4) { f[foff + i34 + 3] += dumw; } } /* * If OPENMP and NOREDUCE are defined and N14 == 0, execute * a (j,i) loop nest to update f[j34 + 0..3]. * * Because the (j,i) loop nest uses the same thread-to-index * mapping in the outer loop as does the prior (i,j) loop nest, * no thread synchronization is required between the two nests. */ #if defined(OPENMP) && defined(NOREDUCE) if (N14 == 0) { /* * Initialize the iexw array used for skipping excluded atoms. * Note that because of the manner in which iexw is used, it is * necessary to initialize it before only the first iteration of * the following loop. */ for (i = -1; i < prm->Natom; i++) { iexw[i] = -1; } /* * Loop over all atoms j except for the first atom. * * Because OPENMP and NOREDUCE are defined, this (j,i) loop nest will * update f[j34 + 0..3]. * * For MPI or ScaLAPACK, explicitly assign tasks to loop indices * for the following loop in a manner equivalent to (static, N) * scheduling for OpenMP. For OpenMP use (dynamic, N) scheduling. * * Synchronization of OpenMP threads will occur following this loop * because the parallel region ends after this loop. Following * synchronization, a reduction of the sumdeijda array will be * performed. * * Synchronization of MPI tasks will occur via the MPI_Allreduce * function that is called from within mme34. */ #if !defined(SPEC) || defined(OPENMP) #pragma omp for schedule(dynamic, blocksize) #endif for (j = 1; j < prm->Natom; j++) { /* Check whether there are any atoms i on the pair list of atom j. */ npr = lpears[j]; if (npr <= 0) continue; dumx = dumy = dumz = 0.0; j34 = dim * j; xj = x[j34 + 0]; yj = x[j34 + 1]; zj = x[j34 + 2]; if (dim == 4) { dumw = 0.0; wj = x[j34 + 3]; } /* * Expand the excluded list into the iexw array by storing j * at array address i. */ for (i = 0; i < Jblo[j]; i++) { iexw[JexclAt[j][i] - 1] = j; } /* Select atom i from the pair list. Non-graceful error handling. */ for (k = 0; k < npr; k++) { if (pearlist[j] == NULL) { fprintf(nabout, "NULL pair list entry in nbond loop 2, taskid = %d\n", mytaskid); fflush(nabout); } i = pearlist[j][k]; i34 = dim * i; iaci = prm->Ntypes * (prm->Iac[i] - 1); cgi = eelfaci * prm->Charges[i]; /* * Check whether this i,j pair is exempted * by the excluded atom list. */ if (iexw[i] != j) { xij = x[i34 + 0] - xj; yij = x[i34 + 1] - yj; zij = x[i34 + 2] - zj; r2 = xij * xij + yij * yij + zij * zij; if (dim == 4) { wij = x[i34 + 3] - wj; r2 += wij * wij; } r2inv = 1.0 / r2; r = sqrt(r2); rinv = r * r2inv; /* Calculate the derivatives according to dield. */ if (dield == -3) { /* special code Ramstein & Lavery dielectric, 94 force field */ rs = SIG * r; rssq = rs * rs; pow = exp(-rs); eps1 = rssq + rs + rs + 2.0; epsi = 1.0 / (DIW - C1 * pow * eps1); cgijr = cgi * prm->Charges[j] * rinv * epsi; df2 = -cgijr * (1.0 + C1 * pow * rs * rssq * epsi); ic = prm->Cno[iaci + prm->Iac[j] - 1] - 1; if (ic >= 0) { r6 = r2inv * r2inv * r2inv; f2 = prm->Cn2[ic] * r6; f1 = prm->Cn1[ic] * r6 * r6; df = (df2 + (6.0 * f2 - 12.0 * f1) * enbfaci) * rinv; } else { df = df2 * rinv; } } else if (dield == -4) { /* distance-dependent dielectric code, 94 ff */ /* epsilon = r */ rs = cgi * prm->Charges[j] * r2inv; df2 = -2.0 * rs; ic = prm->Cno[iaci + prm->Iac[j] - 1] - 1; if (ic >= 0) { r6 = r2inv * r2inv * r2inv; f2 = prm->Cn2[ic] * r6; f1 = prm->Cn1[ic] * r6 * r6; df = (df2 + (6.0 * f2 - 12.0 * f1) * enbfaci) * rinv; } else { df = df2 * rinv; } } else if (dield == -5) { /* non-bonded term from yammp */ dis = r; ic = prm->Cno[iaci + prm->Iac[j] - 1] - 1; d0 = prm->Cn2[ic]; if (dis < d0) { kij = prm->Cn1[ic]; diff = dis - d0; df = 2.0 * kij * diff; } else { df = 0.0; } } else { /* * Code for various dielectric models. * The df2 variable should hold r(dV/dr). */ if (dield == 0) { /* epsilon = r */ rs = cgi * prm->Charges[j] * r2inv; df2 = -2.0 * rs; } else if (dield == 1) { /* epsilon = 1 */ rs = cgi * prm->Charges[j] * rinv; df2 = -rs; } else if (dield == -2) { /* Ramstein & Lavery dielectric, PNAS 85, 7231 (1988). */ rs = SIG * r; rssq = rs * rs; pow = exp(-rs); eps1 = rssq + rs + rs + 2.0; epsi = 1.0 / (DIW - C1 * pow * eps1); cgijr = cgi * prm->Charges[j] * rinv * epsi; df2 = -cgijr * (1.0 + C1 * pow * rs * rssq * epsi); } /* Calculate either Van der Waals or hydrogen bonded term. */ ic = prm->Cno[iaci + prm->Iac[j] - 1]; if (ic > 0 || enbfac != 1.0) { if (ic > 0) { ic--; } else { ibig = prm->Iac[i] > prm->Iac[j] ? prm->Iac[i] : prm->Iac[j]; isml = prm->Iac[i] > prm->Iac[j] ? prm->Iac[j] : prm->Iac[i]; ic = ibig * (ibig - 1) / 2 + isml - 1; } r6 = r2inv * r2inv * r2inv; f2 = prm->Cn2[ic] * r6; f1 = prm->Cn1[ic] * r6 * r6; df = (df2 + (6.0 * f2 - 12.0 * f1) * enbfaci) * rinv; #if 0 if (enbfac != 1.0) nb14 += (f1 - f2) * enbfaci; #endif } else { ic = -ic - 1; r10 = r2inv * r2inv * r2inv * r2inv * r2inv; f2 = prm->HB10[ic] * r10; f1 = prm->HB12[ic] * r10 * r2inv; df = (df2 + (10.0 * f2 - 12.0 * f1) * enbfaci) * rinv; #if 0 hbener += (f1 - f2) * enbfaci; #endif } } /* * The df term contains one more factor of Dij in the denominator * so that terms such as dedx do not need to include 1/Dij. * * Update the derivative accumulators for atom j. */ df *= rinv; dedx = df * xij; dedy = df * yij; dedz = df * zij; dumx += dedx; dumy += dedy; dumz += dedz; if (dim == 4) { dedw = df * wij; dumw += dedw; } } } /* For atom j, the gradient is updated in the j-loop only. */ f[j34 + 0] -= dumx; f[j34 + 1] -= dumy; f[j34 + 2] -= dumz; if (dim == 4) { f[j34 + 3] -= dumw; } } } #endif /* Deallocate the iexw array within this potentially parallel region. */ free_ivector(iexw, -1, prm->Natom); } /* Return evdw and elec through by-reference calling parameters. */ *enb = evdw; *eel = elec; return (0); } /*********************************************************************** EGB() ************************************************************************/ /* * Calculate the generalized Born energy and first derivatives. * * Calling parameters are as follows: * * lpears - number of pairs on the non-bonded lower triangle pair list * upears - number of pairs on the non-bonded upper trianble pair list * pearlist - non-bonded pair list, contiguous for upper & lower triangles * lpearsnp - number of pairs on the non-polar lower triangle pair list * upearsnp - number of pairs on the non-polar upper trianble pair list * pearlistnp - non-polar pair list, contiguous for upper & lower triangles * x - input: the atomic (x,y,z) coordinates * f - updated: the gradient vector * fs - input: overlap parameters * rborn - input: atomic radii * q - input: atomic charges * kappa - input: inverse of the Debye-Huckel length * diel_ext - input: solvent dielectric constant * enb - updated: Lennard-Jones energy * eelt - updated: gas-phase electrostatic energy * esurf - updated: nonpolar surface area solvation free energy * enp - updated: nonpolar van der Waals solvation free energy * freevectors - if !=0 free the static vectors and return */ static REAL_T egb(INT_T * lpears, INT_T * upears, INT_T ** pearlist, INT_T * lpearsnp, INT_T * upearsnp, INT_T ** pearlistnp, REAL_T * x, REAL_T * f, REAL_T * fs, REAL_T * rborn, REAL_T * q, REAL_T * kappa, REAL_T * diel_ext, REAL_T * enb, REAL_T * eelt, REAL_T * esurf, REAL_T * enp, INT_T freevectors) #define BOFFSET (0.09) #define KSCALE (0.73) { #if defined(MPI) || defined(SCALAPACK) int ierror; static REAL_T *reductarr = NULL; #endif static REAL_T *reff = NULL, *sumdeijda = NULL, *psi = NULL; static int *reqack = NULL, *iexw = NULL; char atsymb; int i, i34, j, j34, k, threadnum, numthreads, maxthreads, eoff, foff, soff; int npairs, ic, iaci, iteration, mask, consumer, producer, numcopies; size_t natom; REAL_T epol, dielfac, qi, qj, qiqj, fgbi, fgbk, rb2, expmkf; REAL_T elec, evdw, sumda, daix, daiy, daiz, daiw; REAL_T xi, yi, zi, wi = 0.0, xj, yj, zj, wj = 0.0, xij, yij, zij, wij; REAL_T dedx, dedy, dedz, dedw, de; REAL_T dij1i, dij3i, temp1; REAL_T qi2h, qid2h, datmp; REAL_T theta, ri1i, dij2i; REAL_T dij, sumi, t1, t2; REAL_T eel, f6, f12, rinv, r2inv, r6inv; REAL_T r2, ri, rj, sj, sj2, thi; REAL_T uij, efac, temp4, temp5, temp6; REAL_T dumbo, tmpsd; REAL_T rgbmax1i, rgbmax2i, rgbmaxpsmax2; /* LCPO stuff follows */ int count, count2, icount; REAL_T si, sumAij, sumAjk, sumAijAjk, sumdAijddijdxi; REAL_T sumdAijddijdyi, sumdAijddijdzi, sumdAijddijdxiAjk; REAL_T sumdAijddijdyiAjk, sumdAijddijdziAjk, rij, tmpaij, Aij, dAijddij; REAL_T dAijddijdxj, dAijddijdyj, dAijddijdzj; REAL_T sumdAjkddjkdxj, sumdAjkddjkdyj, sumdAjkddjkdzj, p3p4Aij; REAL_T xk, yk, zk, rjk2, djk1i, rjk, vdw2dif, tmpajk, Ajk, sumAjk2, dAjkddjk; REAL_T dAjkddjkdxj, dAjkddjkdyj, dAjkddjkdzj, lastxj, lastyj, lastzj; REAL_T dAidxj, dAidyj, dAidzj, Ai, dAidxi, dAidyi, dAidzi; REAL_T totsasa; /* AGBNP stuff follows */ int maxdepth, maxmaxdepth, fpair, sender, minusone = -1; int *setarray; REAL_T totalvolume, totvolume, surfacearea, surfarea, radius; REAL_T evdwnp, vdwdenom, vdwterm; #if defined(MPI) || defined(SCALAPACK) MPI_Status status; #endif /*FGB taylor coefficients follow */ /* from A to H : */ /* 1/3 , 2/5 , 3/7 , 4/9 , 5/11 */ /* 4/3 , 12/5 , 24/7 , 40/9 , 60/11 */ #define TA 0.33333333333333333333 #define TB 0.4 #define TC 0.42857142857142857143 #define TD 0.44444444444444444444 #define TDD 0.45454545454545454545 #define TE 1.33333333333333333333 #define TF 2.4 #define TG 3.42857142857142857143 #define TH 4.44444444444444444444 #define THH 5.45454545454545454545 /* * Determine the size of the iexw array. If OPENMP is * defined, a copy of this array must be allocated for * each thread; otherwise, only one copy is allocated. */ #ifdef OPENMP maxthreads = omp_get_max_threads(); #else maxthreads = 1; #endif /* * Determine the size of the sumdeijda array. If OPENMP is * defined and NOREDUCE is not defined, a copy of this array * must be allocated for each thread; otherwise, only one copy * is allocated. */ #ifndef NOREDUCE numcopies = maxthreads; #else numcopies = 1; #endif natom = (size_t) prm->Natom; /* * If freevectors != 0, deallocate the static arrays that have been * previously allocated and return. */ if (freevectors != 0) { if (reff != NULL) { free_vector(reff, 0, natom); reff = NULL; } if (iexw != NULL) { free_ivector(iexw, -1, maxthreads*(natom+1)); iexw = NULL; } if (sumdeijda != NULL) { free_vector(sumdeijda, 0, numcopies*natom); sumdeijda = NULL; } if (psi != NULL) { free_vector(psi, 0, natom); psi = NULL; } if (reqack != NULL) { free_ivector(reqack, 0, maxthreads); reqack = NULL; } #if defined(MPI) || defined(SCALAPACK) if (reductarr != NULL) { free_vector(reductarr, 0, natom); reductarr = NULL; } #endif return (0.0); } /* * Smooth "cut-off" in calculating GB effective radii. * Implementd by Andreas Svrcek-Seiler and Alexey Onufriev. * The integration over solute is performed up to rgbmax and includes * parts of spheres; that is an atom is not just "in" or "out", as * with standard non-bonded cut. As a result, calclated effective * radii are less than rgbmax. This saves time, and there is no * discontinuity in dReff/drij. * * Only the case rgbmax > 5*max(sij) = 5*fsmax ~ 9A is handled; this is * enforced in mdread(). Smaller values would not make much physical * sense anyway. * * Note: rgbmax must be less than or equal to cut so that the pairlist * generated from cut may be applied to calculation of the effective * radius and its derivatives. */ if (rgbmax > cut) { fprintf(nabout, "Error in egb: rgbmax = %f is greater than cutoff = %f\n", rgbmax, cut); exit(1); } rgbmax1i = 1.0 / rgbmax; rgbmax2i = rgbmax1i * rgbmax1i; rgbmaxpsmax2 = (rgbmax + prm->Fsmax) * (rgbmax + prm->Fsmax); /* Allocate some static arrays if they have not been allocated already. */ if (reff == NULL) { reff = vector(0, natom); } if (iexw == NULL) { iexw = ivector(-1, maxthreads*(natom+1)); } if (sumdeijda == NULL) { sumdeijda = vector(0, numcopies*natom); } if ( (psi == NULL) && (gb==2 || gb==5) ) { psi = vector(0, natom); } if (reqack == NULL) { reqack = ivector(0, numcopies); } #if defined(MPI) || defined(SCALAPACK) if (reductarr == NULL) { reductarr = vector(0, natom); } #endif if (gb_debug) fprintf(nabout, "Effective Born radii:\n"); /* * Get the "effective" Born radii via the approximate pairwise method. * Use Eqs 9-11 of Hawkins, Cramer, Truhlar, J. Phys. Chem. 100:19824 * (1996). * * For MPI or ScaLAPACK, initialize all elements of the reff array. * Although each task will calculate only a subset of the elements, * a reduction is used to combine the results from all tasks. * If a gather were used instead of a reduction, no initialization * would be necessary. */ #if defined(MPI) || defined(SCALAPACK) for (i = 0; i < prm->Natom; i++) { reff[i] = 0.0; } #endif #if !defined(SPEC) || defined(OPENMP) #pragma omp parallel \ private (i, xi, yi, zi, wi, ri, ri1i, sumi, j, k, xij, yij, zij, wij, \ r2, dij1i, dij, sj, sj2, uij, dij2i, tmpsd, dumbo, theta, \ threadnum, numthreads) #endif { /* * Get the thread number and the number of threads for multi-threaded * execution under OpenMP. For all other cases, including ScaLAPACK, * MPI and single-threaded execution, use the values that have been * stored in mytaskid and numtasks, respectively. */ #if defined(OPENMP) threadnum = omp_get_thread_num(); numthreads = omp_get_num_threads(); #else threadnum = mytaskid; numthreads = numtasks; #endif /* * Loop over all atoms i. * * For MPI or ScaLAPACK, explicitly assign tasks to loop indices * for the following loop in a manner equivalent to (static, N) * scheduling for OpenMP. For OpenMP use (dynamic, N) scheduling. * * The reff array is written in the following loops. It is necessary to * synchronize the OpenMP threads or MPI tasks that execute these loops * following loop execution so that a race condition does not exist for * reading the reff array before it is written. Even if all subsequent * loops use loop index to thread or task mapping that is identical to * that of the following loop, elements of the reff array are indexed by * other loop indices, so synchronization is necessary. * * OpenMP synchronization is accomplished by the implied barrier * at the end of this 'pragma omp for'. MPI synchronization is * accomplished by MPI_Allreduce. */ #if !defined(SPEC) || defined(OPENMP) #pragma omp for schedule(dynamic, blocksize) #endif for (i = 0; i < prm->Natom; i++) { #if defined(MPI) || defined(SCALAPACK) if (!myroc(i, blocksize, numthreads, threadnum)) continue; #endif xi = x[dim * i]; yi = x[dim * i + 1]; zi = x[dim * i + 2]; if (dim == 4) { wi = x[dim * i + 3]; } ri = rborn[i] - BOFFSET; ri1i = 1. / ri; sumi = 0.0; /* Select atom j from the pair list. Non-graceful error handling. */ for (k = 0; k < lpears[i] + upears[i]; k++) { if (pearlist[i] == NULL) { fprintf(nabout, "NULL pair list entry in egb loop 1, taskid = %d\n", mytaskid); fflush(nabout); } j = pearlist[i][k]; xij = xi - x[dim * j]; yij = yi - x[dim * j + 1]; zij = zi - x[dim * j + 2]; r2 = xij * xij + yij * yij + zij * zij; if (dim == 4) { wij = wi - x[dim * j + 3]; r2 += wij * wij; } if (r2 > rgbmaxpsmax2) continue; dij1i = 1.0 / sqrt(r2); dij = r2 * dij1i; sj = fs[j] * (rborn[j] - BOFFSET); sj2 = sj * sj; /* * ---following are from the Appendix of Schaefer and Froemmel, * JMB 216:1045-1066, 1990; Taylor series expansion for d>>s * is by Andreas Svrcek-Seiler; smooth rgbmax idea is from * Andreas Svrcek-Seiler and Alexey Onufriev. */ if (dij > rgbmax + sj) continue; if ((dij > rgbmax - sj)) { uij = 1. / (dij - sj); sumi -= 0.125 * dij1i * (1.0 + 2.0 * dij * uij + rgbmax2i * (r2 - 4.0 * rgbmax * dij - sj2) + 2.0 * log((dij - sj) * rgbmax1i)); } else if (dij > 4.0 * sj) { dij2i = dij1i * dij1i; tmpsd = sj2 * dij2i; dumbo = TA + tmpsd * (TB + tmpsd * (TC + tmpsd * (TD + tmpsd * TDD))); sumi -= sj * tmpsd * dij2i * dumbo; } else if (dij > ri + sj) { sumi -= 0.5 * (sj / (r2 - sj2) + 0.5 * dij1i * log((dij - sj) / (dij + sj))); } else if (dij > fabs(ri - sj)) { theta = 0.5 * ri1i * dij1i * (r2 + ri * ri - sj2); uij = 1. / (dij + sj); sumi -= 0.25 * (ri1i * (2. - theta) - uij + dij1i * log(ri * uij)); } else if (ri < sj) { sumi -= 0.5 * (sj / (r2 - sj2) + 2. * ri1i + 0.5 * dij1i * log((sj - dij) / (sj + dij))); } } if (gb == 1) { /* "standard" (HCT) effective radii: */ reff[i] = 1.0 / (ri1i + sumi); if (reff[i] < 0.0) reff[i] = 30.0; } else { /* "gbao" formulas: */ psi[i] = -ri * sumi; reff[i] = 1.0 / (ri1i - tanh((gbalpha - gbbeta * psi[i] + gbgamma * psi[i] * psi[i]) * psi[i]) / rborn[i]); } if (gb_debug) fprintf(nabout, "%d\t%15.7f\t%15.7f\n", i + 1, rborn[i], reff[i]); } } /* The MPI synchronization is accomplished via reduction of the reff array. */ #if defined(MPI) || defined(SCALAPACK) t1 = seconds(); ierror = MPI_Allreduce(reff, reductarr, prm->Natom, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); if (ierror != MPI_SUCCESS) { fprintf(nabout, "Error in egb reff reduction, error = %d mytaskid = %d\n", ierror, mytaskid); } for (i = 0; i < prm->Natom; i++) { reff[i] = reductarr[i]; } /* Update the reduction time. */ t2 = seconds(); treduce += t2 - t1; t1 = t2; #endif /* Do not compute non-polar contributions for this benchmark code. */ *esurf = 0.0; /* Compute the GB, Coulomb and Lennard-Jones energies and derivatives. */ epol = elec = evdw = evdwnp = 0.0; #if !defined(SPEC) || defined(OPENMP) #pragma omp parallel reduction (+: epol, elec, evdw, evdwnp) \ private (i, i34, ri, qi, qj, expmkf, dielfac, qi2h, qid2h, iaci, \ xi, yi, zi, wi, k, j, j34, xij, yij, zij, wij, r2, qiqj, \ rj, rb2, efac, fgbi, fgbk, temp4, temp6, eel, de, temp5, \ rinv, r2inv, ic, r6inv, f6, f12, dedx, dedy, dedz, dedw, \ threadnum, numthreads, eoff, foff, soff, vdwdenom, vdwterm, \ sumda, thi, ri1i, dij1i, datmp, daix, daiy, daiz, daiw, \ dij2i, dij, sj, sj2, temp1, dij3i, tmpsd, dumbo, npairs, \ iteration, mask, consumer, producer, xj, yj, zj, wj) #endif { /* * Get the thread number and the number of threads for multi-threaded * execution under OpenMP. For all other cases, including ScaLAPACK, * MPI and single-threaded execution, use the values that have been * stored in mytaskid and numtasks, respectively. */ #if defined(OPENMP) threadnum = omp_get_thread_num(); numthreads = omp_get_num_threads(); #else threadnum = mytaskid; numthreads = numtasks; #endif /* * Compute offset into the iexw array for this thread, but only * if OPENMP is defined. Otherwise, the offset is zero. */ #ifdef OPENMP eoff = (prm->Natom + 1) * threadnum; #else eoff = 0; #endif /* * Compute offsets into the gradient and sumdeijda arrays for this * thread, but only if OPENMP is defined and NOREDUCE is not defined. * Otherwise, the offsets are zero. */ #if defined(OPENMP) && !defined(NOREDUCE) soff = prm->Natom * threadnum; foff = dim * soff; #else soff = 0; foff = 0; #endif /* * Initialize the sumdeijda array inside of the parallel region. * * For MPI and ScaLAPACK, each process has its own copy of the * array which must be initialized in its entirety because a * call to MPI_Allreduce will be used to reduce the array. * The MPI reduction will synchronize the processes. * * For OpenMP, when NOREDUCE is not defined each thread has its * own copy of the array as well. But when NOREDUCE is not * defined there is only one copy of the array, and threads must * be synchronized to ensure that this copy is initialized * prior to use. */ #if !defined(OPENMP) || !defined(NOREDUCE) for (i = 0; i < prm->Natom; i++) { sumdeijda[soff + i] = 0.0; } #elif defined(OPENMP) && defined(NOREDUCE) if (threadnum == 0) { for (i = 0; i < prm->Natom; i++) { sumdeijda[soff + i] = 0.0; } } #pragma omp barrier #endif /* * Initialize the iexw array used for skipping excluded atoms. * * Note that because of the manner in which iexw is used, it * is necessary to initialize it before only the first iteration of * the following loop. */ for (i = -1; i < prm->Natom; i++) { iexw[eoff + i] = -1; } /* * Loop over all atoms i. * * If OPENMP and NOREDUCE are defined, this (i,j) loop nest will * update sumdeijda[i] and f[i34 + 0..3] only. * * Synchronization of OpenMP threads will occur following this * loop nest because of the '#pragma omp for'. * * For MPI or ScaLAPACK, explicitly assign tasks to loop indices * for the following loop in a manner equivalent to (static, N) * scheduling for OpenMP. For OpenMP use (dynamic, N) scheduling. */ #if !defined(SPEC) || defined(OPENMP) #pragma omp for schedule(dynamic, blocksize) #endif for (i = 0; i < prm->Natom; i++) { #if defined(MPI) || defined(SCALAPACK) if (!myroc(i, blocksize, numthreads, threadnum)) continue; #endif ri = reff[i]; qi = q[i]; /* * If atom i is not frozen, compute the "diagonal" energy that * is a function of only the effective radius Ri but not of the * interatomic distance Dij. Compute also the contribution of * the diagonal energy term to the sum by which the derivative * of Ri will be multiplied. Do not calculate the van der Waals * component of the non-polar solvation free energy for this * benchmark version of the code. */ if (!frozen[i]) { expmkf = exp(-KSCALE * (*kappa) * ri) / (*diel_ext); dielfac = 1.0 - expmkf; qi2h = 0.5 * qi * qi; qid2h = qi2h * dielfac; epol += -qid2h / ri; vdwterm = 0.0; vdwdenom = 1.0; sumdeijda[soff + i] += qid2h - KSCALE * (*kappa) * qi2h * expmkf * ri + vdwterm; } /* * Skip the pair calculations if there are no atoms j on the * pair list of atom i. */ npairs = upears[i]; if (npairs <= 0) continue; i34 = dim * i; xi = x[i34]; yi = x[i34 + 1]; zi = x[i34 + 2]; if (dim == 4) { wi = x[i34 + 3]; } iaci = prm->Ntypes * (prm->Iac[i] - 1); /* * Expand the excluded atom list into the iexw array by storing i * at array address j. */ for (j = 0; j < prm->Iblo[i]; j++) { iexw[eoff + IexclAt[i][j] - 1] = i; } /* Initialize the derivative accumulators. */ daix = daiy = daiz = daiw = 0.0; /* Select atom j from the pair list. Non-graceful error handling. */ for (k = lpears[i]; k < lpears[i] + npairs; k++) { if (pearlist[i] == NULL) { fprintf(nabout, "NULL pair list entry in egb loop 3, taskid = %d\n", mytaskid); fflush(nabout); } j = pearlist[i][k]; j34 = dim * j; /* Continue computing the non-diagonal energy term. */ xij = xi - x[j34]; yij = yi - x[j34 + 1]; zij = zi - x[j34 + 2]; r2 = xij * xij + yij * yij + zij * zij; if (dim == 4) { wij = wi - x[j34 + 3]; r2 += wij * wij; } /* * Because index j is retrieved from the pairlist array it is * not constrained to a particular range of values; therefore, * the threads that have loaded the reff array must be * synchronized prior to the use of reff below. */ qiqj = qi * q[j]; rj = reff[j]; rb2 = ri * rj; efac = exp(-r2 / (4.0 * rb2)); fgbi = 1.0 / sqrt(r2 + rb2 * efac); fgbk = -(*kappa) * KSCALE / fgbi; expmkf = exp(fgbk) / (*diel_ext); dielfac = 1.0 - expmkf; epol += -qiqj * dielfac * fgbi; temp4 = fgbi * fgbi * fgbi; temp6 = qiqj * temp4 * (dielfac + fgbk * expmkf); de = temp6 * (1.0 - 0.25 * efac); temp5 = 0.5 * efac * temp6 * (rb2 + 0.25 * r2); /* * Compute the contribution of the non-diagonal energy term to the * sum by which the derivatives of Ri and Rj will be multiplied. */ sumdeijda[soff + i] += ri * temp5; #if !defined(OPENMP) || !defined(NOREDUCE) sumdeijda[soff + j] += rj * temp5; #endif /* * Compute the Van der Waals and Coulombic energies for only * those pairs that are not on the excluded atom list. Any * pair on the excluded atom list will have atom i stored at * address j of the iexw array. It is not necessary to reset * the elements of the iexw array to -1 between successive * iterations in i because an i,j pair is uniquely identified * by atom i stored at array address j. Thus for example, the * i+1,j pair would be stored at the same address as the i,j * pair but after the i,j pair were used. * * The de term contains one more factor of Dij in the denominator * so that terms such as dedx do not need to include 1/Dij. */ if (iexw[eoff + j] != i) { rinv = 1. / sqrt(r2); r2inv = rinv * rinv; /* gas-phase Coulomb energy: */ eel = qiqj * rinv; elec += eel; de -= eel * r2inv; /* Lennard-Jones energy: */ ic = prm->Cno[iaci + prm->Iac[j] - 1] - 1; if (ic >= 0) { r6inv = r2inv * r2inv * r2inv; f6 = prm->Cn2[ic] * r6inv; f12 = prm->Cn1[ic] * r6inv * r6inv; evdw += f12 - f6; de -= (12. * f12 - 6. * f6) * r2inv; } } /* * Sum to the gradient vector the derivatives of Dij that are * computed relative to the cartesian coordinates of atoms i and j. */ dedx = de * xij; dedy = de * yij; dedz = de * zij; daix += dedx; daiy += dedy; daiz += dedz; if (dim == 4) { dedw = de * wij; daiw += dedw; } /* Update the j elements of the gradient array. */ #if !defined(OPENMP) || !defined(NOREDUCE) f[foff + j34] -= dedx; f[foff + j34 + 1] -= dedy; f[foff + j34 + 2] -= dedz; if (dim == 4) { f[foff + j34 + 3] -= dedw; } #endif } /* Update the i elements of the gradient array. */ f[foff + i34] += daix; f[foff + i34 + 1] += daiy; f[foff + i34 + 2] += daiz; if (dim == 4) { f[foff + i34 + 3] += daiw; } } /* * If OPENMP and NOREDUCE are defined, execute a (j,i) loop nest * to update sumdeijda[j] and f[j34 + 0..3]. */ #if defined(OPENMP) && defined(NOREDUCE) /* * Initialize the iexw array used for skipping excluded atoms. * * Note that because of the manner in which iexw is used, it * is necessary to initialize it before only the first iteration of * the following loop. */ for (i = -1; i < prm->Natom; i++) { iexw[eoff + i] = -1; } /* * Loop over all atoms j. * * Because OPENMP and NOREDUCE are defined, this (j,i) loop nest will * update sumdeijda[j] and f[j34 + 0..3] only. * * For MPI or ScaLAPACK, explicitly assign tasks to loop indices * for the following loop in a manner equivalent to (static, N) * scheduling for OpenMP. For OpenMP use (dynamic, N) scheduling. */ #pragma omp for schedule(dynamic, blocksize) for (j = 0; j < prm->Natom; j++) { /* * Skip the pair calculations if there are no atoms i on the * pair list of atom j. */ npairs = lpears[j]; if (npairs <= 0) continue; qj = q[j]; rj = reff[j]; j34 = dim * j; xj = x[j34]; yj = x[j34 + 1]; zj = x[j34 + 2]; if (dim == 4) { wj = x[j34 + 3]; } /* * Expand the excluded atom list into the iexw array by storing j * at array address i. */ for (i = 0; i < Jblo[j]; i++) { iexw[eoff + JexclAt[j][i] - 1] = j; } /* Initialize the derivative accumulators. */ daix = daiy = daiz = daiw = 0.0; /* Select atom i from the pair list. Non-graceful error handling. */ for (k = 0; k < npairs; k++) { if (pearlist[j] == NULL) { printf("NULL pair list entry in egb loop 4, taskid = %d\n", mytaskid); fflush(nabout); } i = pearlist[j][k]; i34 = dim * i; xij = x[i34] - xj; yij = x[i34 + 1] - yj; zij = x[i34 + 2] - zj; r2 = xij * xij + yij * yij + zij * zij; if (dim == 4) { wij = x[i34 + 3] - wj; r2 += wij * wij; } iaci = prm->Ntypes * (prm->Iac[i] - 1); /* * Because index i is retrieved from the pairlist array it is * not constrained to a particular range of values; therefore, * the threads that have loaded the reff array must be * synchronized prior to the use of reff below. */ qiqj = q[i] * qj; ri = reff[i]; rb2 = ri * rj; efac = exp(-r2 / (4.0 * rb2)); fgbi = 1.0 / sqrt(r2 + rb2 * efac); fgbk = -(*kappa) * KSCALE / fgbi; expmkf = exp(fgbk) / (*diel_ext); dielfac = 1.0 - expmkf; temp4 = fgbi * fgbi * fgbi; temp6 = qiqj * temp4 * (dielfac + fgbk * expmkf); de = temp6 * (1.0 - 0.25 * efac); temp5 = 0.5 * efac * temp6 * (rb2 + 0.25 * r2); /* * Compute the contribution of the non-diagonal energy term to the * sum by which the derivatives of Ri and Rj will be multiplied. */ sumdeijda[j] += rj * temp5; /* * Compute the Van der Waals and Coulombic energies for only * those pairs that are not on the excluded atom list. Any * pair on the excluded atom list will have atom j stored at * address i of the iexw array. It is not necessary to reset * the elements of the iexw array to -1 between successive * iterations in j because an i,j pair is uniquely identified * by atom j stored at array address i. Thus for example, the * i,j+1 pair would be stored at the same address as the i,j * pair but after the i,j pair were used. * * The de term contains one more factor of Dij in the denominator * so that terms such as dedx do not need to include 1/Dij. */ if (iexw[eoff + i] != j) { rinv = 1. / sqrt(r2); r2inv = rinv * rinv; /* gas-phase Coulomb energy: */ eel = qiqj * rinv; de -= eel * r2inv; /* Lennard-Jones energy: */ ic = prm->Cno[iaci + prm->Iac[j] - 1] - 1; if (ic >= 0) { r6inv = r2inv * r2inv * r2inv; f6 = prm->Cn2[ic] * r6inv; f12 = prm->Cn1[ic] * r6inv * r6inv; de -= (12. * f12 - 6. * f6) * r2inv; } } /* * Sum to the gradient vector the derivatives of Dij that are * computed relative to the cartesian coordinates of atoms i and j. */ dedx = de * xij; dedy = de * yij; dedz = de * zij; daix += dedx; daiy += dedy; daiz += dedz; if (dim == 4) { dedw = de * wij; daiw += dedw; } } /* Update the j elements of the gradient array. */ f[j34] -= daix; f[j34 + 1] -= daiy; f[j34 + 2] -= daiz; if (dim == 4) { f[j34 + 3] -= daiw; } } #endif } /* * If OPENMP is defined and NOREDUCE is not defined, perform * a reduction over sumdeijda either logarithmically or not. * * Note: for very large numbers of threads, the cost of reduction * may exceed the cost of separate (i, j) and (j, i) loop nests * that are used in this egb function and in the egb2 function. */ #if defined(OPENMP) && !defined(NOREDUCE) t1 = seconds(); #undef LOGARITHMIC_REDUCTION #ifdef LOGARITHMIC_REDUCTION /* * Here is the logarithmic reduction for OpenMP. * Initialize the reqack array. */ for (i = 0; i < numcopies; i++) { reqack[i] = 0; } #pragma omp parallel \ private (i, iteration, mask, consumer, producer, threadnum) { /* * If EGB_OMP_FLUSH is not defined, synchronize the threads * via '#pragma omp barrier' which can be costly due to the * need to synchronize all of the threads. * * If EGB_OMP_FLUSH is defined, four-cycle signaling will be * used to synchronize the threads, as will be seen below, * but here we need a '#pragma omp flush' so that the request * and acknowledge flags are read correctly by all of the threads. */ #define EGB_OMP_FLUSH #ifndef EGB_OMP_FLUSH #pragma omp barrier #else #pragma omp flush #endif /* * Calculate the iterations for the log2 reduction of the grad array. * Note that each OpenMP thread determines 'consumer' and 'producer' * from its thread number. */ threadnum = omp_get_thread_num(); iteration = maxthreads - 1; mask = 1; while (iteration > 0) { consumer = threadnum & (~mask); producer = consumer | ((mask + 1) >> 1); /* * 'Consumer' designates a thread to which to add data from a * 'producer' thread. Perform reduction only when both consumer * and producer are less than maxthreads. * * For successive iterations of the loop mask will have the values * 1, 3, 7, 15../ * * The for the example of maxThreads=14 (numThreads=0..13), the * following threads will be chosen by consumer and producer: * * (iteration 1, consumer) - 0, 2, 4, 6, 8, 10, 12 * (iteration 1, producer) - 1, 3, 5, 7, 9, 11, 13 * * (iteration 2, consumer) - 0, 4, 8, 12 * (iteration 2, producer) - 2, 6, 10, 13 * * (iteration 3, consumer) - 0, 8 * (iteration 3, producer) - 4, 12 * * (iteration 4, consumer) - 0 * (iteration 4, producer) - 8 * * As the example shows, the final result is found in the * sumdeijda array for thread 0. * * Note that the following if statement uses maxthreads to * determine whether to perform a reduction step. Maxthreads * may not be used to determine whether to execute the while loop * (above) because all threads must execute the while loop in order * that the '#pragma omp barrier' within the loop not hang due to * threads that are not executing the loop. The test for * (threadNum == consumer) guarantees that this thread will accept * data from a producer thread which has (threadNum == producer). * The test for (producer < maxthreads) guarantees that the producer * thread exists. */ if ( ( threadnum == consumer ) && ( producer < maxthreads ) ) { /* * If EGB_OMP_FLUSH is defined, four-cycle signaling is used to * synchronize the threads. The 'consumer' thread raises its * request flag then waits for the 'producer' thread to raise * its acknowledge flag. * * The request flag is set by assigning the value of 'iteration' * to reqack[consumer]. The acknowledge flag is set by assigning * the value of iteration to reqack[producer]. This value * is used instead of 1 in order that each iteration of the loop * have a unique value for the request and acknowledge flags. * This approach avoids a race condition across iterations of * the loop for access to the request and acknowledge flags. * * If EGB_OMP_FLUSH is not defined then no test is necessary * because '#pragma omp barrier' is used to resynchronize * all threads following each iteration of the while loop. */ #ifdef EGB_OMP_FLUSH reqack[consumer] = iteration; #pragma omp flush do { #pragma omp flush } while (reqack[producer] != iteration); #endif /* * The producer and consumer threads are synchronized, * so add the grad array from the producer to the * grad array from the consumer. */ for (i = 0; i < prm->Natom; i++) { sumdeijda[consumer * prm->Natom + i] += sumdeijda[producer * prm->Natom + i]; } /* * If EGB_OMP_FLUSH is defined, four-cycle signaling is used to * synchronize the threads. The 'consumer' thread lowers its * request flag then waits for the 'producer' thread to lower * its acknowledge flag. */ #ifdef EGB_OMP_FLUSH reqack[consumer] = 0; #pragma omp flush do { #pragma omp flush } while (reqack[producer] == iteration); #endif } /* * Here is the if statement that controls whether an OpenMP thread * is a producer. Note that because consumer never equals producer, * a thread cannot be both the consumer and producer during a given * iteration of the while loop. * * Because the grad array contents are copied by the consumer thread, * the producer thread needs only to synchronize via four-cycle * signaling. * * It is necessary not only to check that (threadnum == producer) * but also that (threadnum < maxthreads) to ensure that the * producer thread exists. */ if ( ( threadnum == producer ) && ( threadnum < maxthreads) ) { /* * If EGB_OMP_FLUSH is defined, four-cycle signaling is used to * synchronize the threads. The 'producer' thread waits for * the 'consumer' thread to raise its request flag, then raises * its acknowledge flag, then waits for the 'consumer' thread * to lower its request flag (indicating that the 'consumer' * thread has read the data), then lowers its acknowledge flag. */ #ifdef EGB_OMP_FLUSH do { #pragma omp flush } while (reqack[consumer] != iteration); reqack[producer] = iteration; #pragma omp flush do { #pragma omp flush } while (reqack[consumer] == iteration); reqack[producer] = 0; #pragma omp flush #endif } /* * If EGB_OMP_FLUSH is not defined resynchronize via * '#pragma omp barrier'. */ #ifndef EGB_OMP_FLUSH #pragma omp barrier #endif /* Prepare for the next iteration of the while loop. */ mask = (mask << 1) + 1; iteration >>= 1; } } #else /* * Here is the non-logarithmic reduction of the sumdeijda array * under OpenMP. Add to the sumdeijda array for thread 0 all * of the sumdeijda arrays for non-zero threads. The (j, i) * loop nest is more efficient than an (i, j) loop nest. * * Note: the following 'if' should not be needed, but works around a * bug in ifort 9.1 on ia64. */ if (maxthreads > 1) { #pragma omp parallel for private(i, j) schedule(dynamic, blocksize) for (j = 0; j < prm->Natom; j++) { for (i = 1; i < numcopies; i++) { sumdeijda[j] += sumdeijda[prm->Natom * i + j]; } } } /* Update the reduction time. */ t2 = seconds(); treduce += t2 - t1; t1 = t2; #endif /* Perform a reduction of sumdeijda if MPI or SCALAPACK is defined. */ #elif defined(MPI) || defined(SCALAPACK) t1 = seconds(); ierror = MPI_Allreduce(sumdeijda, reductarr, prm->Natom, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); if (ierror != MPI_SUCCESS) { fprintf(nabout, "Error in egb sumdeijda reduction, error = %d mytaskid = %d\n", ierror, mytaskid); } for (i = 0; i < prm->Natom; i++) { sumdeijda[i] = reductarr[i]; } /* Update the reduction time. */ t2 = seconds(); treduce += t2 - t1; t1 = t2; #endif #if !defined(SPEC) || defined(OPENMP) #pragma omp parallel \ private (i, i34, ri, qi, expmkf, dielfac, qi2h, qid2h, iaci, \ xi, yi, zi, wi, k, j, j34, xij, yij, zij, wij, r2, qiqj, \ rj, rb2, efac, fgbi, fgbk, temp4, temp6, eel, de, temp5, \ rinv, r2inv, ic, r6inv, f6, f12, dedx, dedy, dedz, dedw, \ threadnum, numthreads, foff, xj, yj, zj, wj, \ sumda, thi, ri1i, dij1i, datmp, daix, daiy, daiz, daiw, \ dij2i, dij, sj, sj2, temp1, dij3i, tmpsd, dumbo, npairs) #endif { /* * Get the thread number and the number of threads for multi-threaded * execution under OpenMP. For all other cases, including ScaLAPACK, * MPI and single-threaded execution, use the values that have been * stored in mytaskid and numtasks, respectively. */ #if defined(OPENMP) threadnum = omp_get_thread_num(); numthreads = omp_get_num_threads(); #else threadnum = mytaskid; numthreads = numtasks; #endif /* * Compute an offset into the gradient array for this thread, * but only if OPENMP is defined and NOREDUCE is not defined. * Even if NOREDUCE is defined, there is no need to compute * an offset into the sumdeijda array because all copies of * this array have been reduced into copy zero. */ #if defined(OPENMP) && !defined(NOREDUCE) foff = prm->Natom * dim * threadnum; #else foff = 0; #endif /* * Compute the derivatives of the effective radius Ri of atom i * with respect to the cartesian coordinates of each atom j. Sum * all of these derivatives into the gradient vector. * * Loop over all atoms i. * * If OPENMP and NOREDUCE are defined, this (i,j) loop nest will * update f[i34 + 0..3] only. * * Synchronization of OpenMP threads will occur following this * loop nest because of the '#pragma omp for'. * * A reduction of the gradient array will occur in the mme34 function, * either for OpenMP or MPI. This reduction will synchronize the MPI * tasks, so an explicit barrier is not necessary following this loop. * * For MPI or ScaLAPACK, explicitly assign tasks to loop indices * for the following loop in a manner equivalent to (static, N) * scheduling for OpenMP. For OpenMP use (dynamic, N) scheduling. */ #if !defined(SPEC) || defined(OPENMP) #pragma omp for schedule(dynamic, blocksize) #endif for (i = 0; i < prm->Natom; i++) { #if defined(MPI) || defined(SCALAPACK) if (!myroc(i, blocksize, numthreads, threadnum)) continue; #endif /* * Don't calculate derivatives of the effective radius of atom i * if atom i is frozen or if there are no pair atoms j associated * with atom i. */ npairs = lpears[i] + upears[i]; if ( frozen[i] || (npairs <= 0) ) continue; i34 = dim * i; xi = x[i34]; yi = x[i34 + 1]; zi = x[i34 + 2]; if (dim == 4) { wi = x[i34 + 3]; } ri = rborn[i] - BOFFSET; ri1i = 1. / ri; sumda = sumdeijda[i]; if (gb > 1) { ri = rborn[i] - BOFFSET; thi = tanh((gbalpha - gbbeta * psi[i] + gbgamma * psi[i] * psi[i]) * psi[i]); sumda *= (gbalpha - 2.0 * gbbeta * psi[i] + 3.0 * gbgamma * psi[i] * psi[i]) * (1.0 - thi * thi) * ri / rborn[i]; } /* Initialize the derivative accumulators. */ daix = daiy = daiz = daiw = 0.0; /* Select atom j from the pair list. Non-graceful error handling. */ for (k = 0; k < npairs; k++) { if (pearlist[i] == NULL) { fprintf(nabout, "NULL pair list entry in egb loop 5, taskid = %d\n", mytaskid); fflush(nabout); } j = pearlist[i][k]; j34 = dim * j; xij = xi - x[j34]; yij = yi - x[j34 + 1]; zij = zi - x[j34 + 2]; r2 = xij * xij + yij * yij + zij * zij; if (dim == 4) { wij = wi - x[j34 + 3]; r2 += wij * wij; } /* Ignore the ij atom pair if their separation exceeds the GB cutoff. */ if (r2 > rgbmaxpsmax2) continue; dij1i = 1.0 / sqrt(r2); dij2i = dij1i * dij1i; dij = r2 * dij1i; sj = fs[j] * (rborn[j] - BOFFSET); sj2 = sj * sj; /* * The following are the numerator of the first derivatives of the * effective radius Ri with respect to the interatomic distance Dij. * They are derived from the equations from the Appendix of Schaefer * and Froemmel as well as from the Taylor series expansion for d>>s * by Andreas Svrcek-Seiler. The smooth rgbmax idea is from Andreas * Svrcek-Seiler and Alexey Onufriev. The complete derivative is * formed by multiplying the numerator by -Ri*Ri. The factor of Ri*Ri * has been moved to the terms that are multiplied by the derivative. * The negation is deferred until later. When the chain rule is used * to form the first derivatives of the effective radius with respect * to the cartesian coordinates, an additional factor of Dij appears * in the denominator. That factor is included in the following * expressions. */ if (dij > rgbmax + sj) continue; if (dij > rgbmax - sj) { temp1 = 1. / (dij - sj); dij3i = dij1i * dij2i; datmp = 0.125 * dij3i * ((r2 + sj2) * (temp1 * temp1 - rgbmax2i) - 2.0 * log(rgbmax * temp1)); } else if (dij > 4.0 * sj) { tmpsd = sj2 * dij2i; dumbo = TE + tmpsd * (TF + tmpsd * (TG + tmpsd * (TH + tmpsd * THH))); datmp = tmpsd * sj * dij2i * dij2i * dumbo; } else if (dij > ri + sj) { temp1 = 1. / (r2 - sj2); datmp = temp1 * sj * (-0.5 * dij2i + temp1) + 0.25 * dij1i * dij2i * log((dij - sj) / (dij + sj)); } else if (dij > fabs(ri - sj)) { temp1 = 1. / (dij + sj); dij3i = dij2i * dij1i; datmp = -0.25 * (-0.5 * (r2 - ri * ri + sj2) * dij3i * ri1i * ri1i + dij1i * temp1 * (temp1 - dij1i) - dij3i * log(ri * temp1)); } else if (ri < sj) { temp1 = 1. / (r2 - sj2); datmp = -0.5 * (sj * dij2i * temp1 - 2. * sj * temp1 * temp1 - 0.5 * dij2i * dij1i * log((sj - dij) / (sj + dij))); } else { datmp = 0.; } /* Sum the derivatives into daix, daiy, daiz and daiw. */ daix += xij * datmp; daiy += yij * datmp; daiz += zij * datmp; if (dim == 4) { daiw += wij * datmp; } /* * Sum the derivatives relative to atom j (weighted by -sumdeijda[i]) * into the gradient vector. For example, f[j34 + 2] contains the * derivatives of Ri with respect to the z-coordinate of atom j. */ #if !defined(OPENMP) || !defined(NOREDUCE) datmp *= sumda; f[foff + j34] += xij * datmp; f[foff + j34 + 1] += yij * datmp; f[foff + j34 + 2] += zij * datmp; if (dim == 4) { f[foff + j34 + 3] += wij * datmp; } #endif } /* * Update the gradient vector with the sums of derivatives of the * effective radius Ri with respect to the cartesian coordinates. * For example, f[i34 + 1] contains the sum of derivatives of Ri * with respect to the y-coordinate of each atom. Multiply by * -sumdeijda[i] here (instead of merely using datmp multiplied by * -sumdeijda) in order to distribute the product across the sum of * derivatives in an attempt to obtain greater numeric stability. */ f[foff + i34] -= sumda * daix; f[foff + i34 + 1] -= sumda * daiy; f[foff + i34 + 2] -= sumda * daiz; if (dim == 4) { f[foff + i34 + 3] -= sumda * daiw; } } #if defined(OPENMP) && defined(NOREDUCE) /* * Compute the derivatives of the effective radius Ri of atom i * with respect to the cartesian coordinates of each atom j. Sum * all of these derivatives into the gradient vector. * * Loop over all atoms j. * * Because OPENMP and NOREDUCE are defined, this (j,i) loop nest will * update f[j34 + 0..3] only. * * Synchronization of OpenMP threads will occur following this loop * because of the '#pragma omp for'. No reduction of the gradient * array is necessary because it will occur in the mme34 function. * * For MPI or ScaLAPACK, explicitly assign tasks to loop indices * for the following loop in a manner equivalent to (static, N) * scheduling for OpenMP. For OpenMP use (dynamic, N) scheduling. */ #if !defined(SPEC) || defined(OPENMP) #pragma omp for schedule(dynamic, blocksize) #endif for (j = 0; j < prm->Natom; j++) { /* * Don't calculate derivatives of the effective radius of atom i * if there are no pair atoms j associated with atom i. */ npairs = lpears[j] + upears[j]; if (npairs <= 0) continue; j34 = dim * j; xj = x[j34]; yj = x[j34 + 1]; zj = x[j34 + 2]; if (dim == 4) { wj = x[j34 + 3]; } sj = fs[j] * (rborn[j] - BOFFSET); sj2 = sj * sj; /* Initialize the derivative accumulators. */ daix = daiy = daiz = daiw = 0.0; /* Select atom i from the pair list. Non-graceful error handling. */ for (k = 0; k < npairs; k++) { if (pearlist[j] == NULL) { printf("NULL pair list entry in egb loop 6, taskid = %d\n", mytaskid); fflush(stdout); } i = pearlist[j][k]; /* * Don't calculate derivatives of the effective radius of atom i * if atom i is frozen. */ if (frozen[i]) continue; i34 = dim * i; xij = x[i34] - xj; yij = x[i34 + 1] - yj; zij = x[i34 + 2] - zj; r2 = xij * xij + yij * yij + zij * zij; if (dim == 4) { wij = x[i34 + 3] - wj; r2 += wij * wij; } /* Ignore the ij atom pair if their separation exceeds the GB cutoff. */ if (r2 > rgbmaxpsmax2) continue; dij1i = 1.0 / sqrt(r2); dij2i = dij1i * dij1i; dij = r2 * dij1i; ri = rborn[i] - BOFFSET; ri1i = 1. / ri; /* * The following are the numerator of the first derivatives of the * effective radius Ri with respect to the interatomic distance Dij. * They are derived from the equations from the Appendix of Schaefer * and Froemmel as well as from the Taylor series expansion for d>>s * by Andreas Svrcek-Seiler. The smooth rgbmax idea is from Andreas * Svrcek-Seiler and Alexey Onufriev. The complete derivative is * formed by multiplying the numerator by -Ri*Ri. The factor of Ri*Ri * has been moved to the terms that are multiplied by the derivative. * The negation is deferred until later. When the chain rule is used * to form the first derivatives of the effective radius with respect * to the cartesian coordinates, an additional factor of Dij appears * in the denominator. That factor is included in the following * expressions. */ if (dij > rgbmax + sj) continue; if (dij > rgbmax - sj) { temp1 = 1. / (dij - sj); dij3i = dij1i * dij2i; datmp = 0.125 * dij3i * ((r2 + sj2) * (temp1 * temp1 - rgbmax2i) - 2.0 * log(rgbmax * temp1)); } else if (dij > 4.0 * sj) { tmpsd = sj2 * dij2i; dumbo = TE + tmpsd * (TF + tmpsd * (TG + tmpsd * (TH + tmpsd * THH))); datmp = tmpsd * sj * dij2i * dij2i * dumbo; } else if (dij > ri + sj) { temp1 = 1. / (r2 - sj2); datmp = temp1 * sj * (-0.5 * dij2i + temp1) + 0.25 * dij1i * dij2i * log((dij - sj) / (dij + sj)); } else if (dij > fabs(ri - sj)) { temp1 = 1. / (dij + sj); dij3i = dij2i * dij1i; datmp = -0.25 * (-0.5 * (r2 - ri * ri + sj2) * dij3i * ri1i * ri1i + dij1i * temp1 * (temp1 - dij1i) - dij3i * log(ri * temp1)); } else if (ri < sj) { temp1 = 1. / (r2 - sj2); datmp = -0.5 * (sj * dij2i * temp1 - 2. * sj * temp1 * temp1 - 0.5 * dij2i * dij1i * log((sj - dij) / (sj + dij))); } else { datmp = 0.; } /* * Because index i is retrieved from the pairlist array it is * not constrained to a particular range of values; therefore, * the threads that have loaded the sumdeijda array have been * synchronized above prior to the use of sumdeijda below. */ sumda = sumdeijda[i]; if (gb > 1) { ri = rborn[i] - BOFFSET; thi = tanh((gbalpha - gbbeta * psi[i] + gbgamma * psi[i] * psi[i]) * psi[i]); sumda *= (gbalpha - 2.0 * gbbeta * psi[i] + 3.0 * gbgamma * psi[i] * psi[i]) * (1.0 - thi * thi) * ri / rborn[i]; } /* Sum the derivatives into daix, daiy, daiz and daiw. */ datmp *= sumda; daix += xij * datmp; daiy += yij * datmp; daiz += zij * datmp; if (dim == 4) { daiw += wij * datmp; } } /* * Update the gradient vector with the sums of derivatives of the * effective radius Ri with respect to the cartesian coordinates. * For example, f[j34 + 1] contains the sum of derivatives of Ri * with respect to the y-coordinate of each atom. */ f[j34] += daix; f[j34 + 1] += daiy; f[j34 + 2] += daiz; if (dim == 4) { f[j34 + 3] += daiw; } } #endif } /* Free the static arrays if static_arrays is 0. */ if (!static_arrays) { if (reff != NULL) { free_vector(reff, 0, natom); reff = NULL; } if (iexw != NULL) { free_ivector(iexw, -1, maxthreads*(natom+1)); iexw = NULL; } if (sumdeijda != NULL) { free_vector(sumdeijda, 0, numcopies*natom); sumdeijda = NULL; } if (psi != NULL) { free_vector(psi, 0, natom); psi = NULL; } if (reqack != NULL) { free_ivector(reqack, 0, numcopies); reqack = NULL; } #if defined(MPI) || defined(SCALAPACK) if (reductarr != NULL) { free_vector(reductarr, 0, natom); reductarr = NULL; } #endif } /* * Return elec, evdw and evdwnp through the parameters eelt, enb and enp. * These variables are computed in parallel. */ *eelt = elec; *enb = evdw; *enp = evdwnp; return (epol); } /*********************************************************************** MME34() ************************************************************************/ /* * Here is the mme function for 3D or 4D, depending upon the dim variable. * * Calling parameters are as follows: * * x - input: the atomic (x,y,z) coordinates * f - updated: the gradient vector * iter - the iteration counter, which if negative selects the following: * -1 print some energy values * -3 call egb to deallocate static arrays, then deallocate grad * -(any other value) normal execution */ static REAL_T mme34(REAL_T * x, REAL_T * f, int *iter) { extern REAL_T tconjgrad; REAL_T ebh, eba, eth, eta, eph, epa, enb, eel, enb14, eel14, ecn; REAL_T e_gb, esurf, evdwnp, frms; REAL_T ene[20]; REAL_T t1, t2; int i, j, k, goff, threadnum, numthreads, maxthreads; int iteration, mask, consumer, producer, numcopies; int dummy = 0; size_t n; static REAL_T *grad = NULL; static int *reqack = NULL; #if defined(MPI) || defined(SCALAPACK) int ierror; REAL_T reductarr[20]; #endif t1 = seconds(); n = (size_t) prm->Natom; /* * If OPENMP is defined, set maxthreads to the maximum number of * OpenMP threads then allocate the reqack array. Otherwise, * set maxthreads to 1. If NOREDUCE is not defined, set * numcopies to maxthreads; otherwise, set it to 1. */ #ifdef OPENMP maxthreads = omp_get_max_threads(); if (reqack == NULL) { reqack = ivector(0, maxthreads); } #else maxthreads = 1; #endif #ifndef NOREDUCE numcopies = maxthreads; #else numcopies = 1; #endif /* * If the iteration count equals -3, call egb to deallocate the * static arrays, deallocate the gradient array, then return; * otherwise, simply return. */ if (*iter == -3) { egb(lpairs, upairs, pairlist, lpairs, upairs, pairlist, x, grad, prm->Fs, prm->Rborn, prm->Charges, &kappa, &epsext, &enb, &eel, &esurf, &evdwnp, 1); if (grad != NULL) { free_vector(grad, 0, numcopies * dim * n); grad = NULL; } if (reqack != NULL) { free_ivector(reqack, 0, maxthreads); reqack = NULL; } return (0.0); } /* If the iteration count equals 0, print the header for task 0 only. */ if (*iter == 0 && mytaskid == 0) { #if !defined(SPEC) || defined(SPEC_VERBOSE_OUTPUT) fprintf(nabout, " iter Total bad vdW elect" " nonpolar genBorn frms\n"); #else fprintf(nabout, " iter Total\n"); #endif fflush(nabout); } /* If the iteration count equals 0, initialize the timing variables. */ if (*iter == 0) { tnonb = tpair = tbond = tangl = tphi = tborn = tcons = tmme = 0.0; tconjgrad = tmd = treduce = 0.0; } /* * Write the checkpoint file every nchk iterations if the chknm * variable is non-NULL. */ if (chknm != NULL && (*iter > 0 && *iter % nchk == 0)) { checkpoint(chknm, prm->Natom, x, *iter); } /* * Build the non-bonded pair list if it hasn't already been built; * rebuild it every nsnb iterations. The non-bonded pair list * uses blocksize to group OpenMP thread to loop index, or MPI task * to loop index, mapping in the nblist and egb functions. It is * global and fully populated for OpenMP, and local and partially * populated for MPI and SCALAPACK. */ if (nb_pairs < 0 || (*iter > 0 && *iter % nsnb == 0)) { nb_pairs = nblist(lpairs, upairs, pairlist, x, dummy, 1, cut, prm->Natom, dim, frozen); t2 = seconds(); tpair += t2 - t1; t1 = t2; } /* * If OPENMP is defined and NOREDUCE is not defined, allocate a * gradient vector for each thread, and let each thread initialize * its gradient vector so that the "first touch" strategy will * allocate local memory. If OpenMP is not defined or if NOREDUCE * is defined, allocate one gradient vector only. * * Note: the following allocations assume that the dimensionality * of the problem does not change during one invocation of NAB. * If, for example, mme34 were called with dim==3 and then with dim==4, * these allocations would not be repeated for the larger value * of n that would be necessitated by dim==4. */ if (grad == NULL) { grad = vector(0, numcopies * dim * n); } #if defined(OPENMP) && !defined(NOREDUCE) #pragma omp parallel private (i, goff) { goff = dim * n * omp_get_thread_num(); for (i = 0; i < dim * prm->Natom; i++) { grad[goff + i] = 0.0; } } #else for (i = 0; i < dim * prm->Natom; i++) { grad[i] = 0.0; } #endif t2 = seconds(); tmme += t2 - t1; t1 = t2; ebh = ebond(prm->Nbonh, prm->BondHAt1, prm->BondHAt2, prm->BondHNum, prm->Rk, prm->Req, x, grad); eba = ebond(prm->Mbona, prm->BondAt1, prm->BondAt2, prm->BondNum, prm->Rk, prm->Req, x, grad); ene[3] = ebh + eba; t2 = seconds(); tbond += t2 - t1; t1 = t2; eth = eangl(prm->Ntheth, prm->AngleHAt1, prm->AngleHAt2, prm->AngleHAt3, prm->AngleHNum, prm->Tk, prm->Teq, x, grad); eta = eangl(prm->Ntheta, prm->AngleAt1, prm->AngleAt2, prm->AngleAt3, prm->AngleNum, prm->Tk, prm->Teq, x, grad); ene[4] = eth + eta; t2 = seconds(); tangl += t2 - t1; t1 = t2; eph = ephi(prm->Nphih, prm->DihHAt1, prm->DihHAt2, prm->DihHAt3, prm->DihHAt4, prm->DihHNum, prm->Pk, prm->Pn, prm->Phase, x, grad); epa = ephi(prm->Mphia, prm->DihAt1, prm->DihAt2, prm->DihAt3, prm->DihAt4, prm->DihNum, prm->Pk, prm->Pn, prm->Phase, x, grad); ene[5] = eph + epa; ene[6] = 0.0; /* hbond term not in Amber-94 force field */ t2 = seconds(); tphi += t2 - t1; t1 = t2; /* In the following lpairs is a dummy argument that is not used. */ nbond(lpairs, prm->N14pairs, N14pearlist, 1, x, grad, &enb14, &eel14, scnb, scee); ene[7] = enb14; ene[8] = eel14; t2 = seconds(); tnonb += t2 - t1; t1 = t2; if (e_debug) { EXPR("%9.3f", enb14); EXPR("%9.3f", eel14); } if (nconstrained) { ecn = econs(x, grad); t2 = seconds(); tcons += t2 - t1; t1 = t2; } else ecn = 0.0; ene[9] = ecn; if (gb) { e_gb = egb(lpairs, upairs, pairlist, lpairsnp, upairsnp, pairlistnp, x, grad, prm->Fs, prm->Rborn, prm->Charges, &kappa, &epsext, &enb, &eel, &esurf, &evdwnp, 0); t2 = seconds(); tborn += t2 - t1; t1 = t2; ene[1] = enb; ene[2] = eel; ene[10] = e_gb; ene[11] = esurf; ene[12] = evdwnp; if (e_debug) { EXPR("%9.3f", enb); EXPR("%9.3f", eel); EXPR("%9.3f", e_gb); EXPR("%9.3f", esurf); EXPR("%9.3f", evdwnp); } } else { nbond(lpairs, upairs, pairlist, 0, x, grad, &enb, &eel, 1.0, 1.0); t2 = seconds(); tnonb += t2 - t1; t1 = t2; ene[1] = enb; ene[2] = eel; ene[10] = 0.0; ene[11] = 0.0; ene[12] = 0.0; if (e_debug) { EXPR("%9.3f", enb); EXPR("%9.3f", eel); } } /* * Perform a reduction over the gradient vector if OPENMP is defined * and NOREDUCE is not defined, or if MPI is defined. * * If OPENMP is defined and NOREDUCE is not defined, the reduction * is performed either logarithmically or not. * * If MPI is defined, the reduction is performed by MPI_Allreduce. */ #if defined(OPENMP) && !defined(NOREDUCE) t1 = seconds(); #undef MME_LOGARITHMIC_REDUCTION #ifdef MME_LOGARITHMIC_REDUCTION /* * Here is the logarithmic reduction for OpenMP. * Initialize the reqack array. */ for (i = 0; i < maxthreads; i++) { reqack[i] = 0; } #pragma omp parallel \ private (i, iteration, mask, consumer, producer, threadnum, goff) { /* * If MME_OMP_FLUSH is not defined, synchronize the threads * via '#pragma omp barrier' which can be costly due to the * need to synchronize all of the threads. * * If MME_OMP_FLUSH is defined, four-cycle signaling will be * used to synchronize the threads, as will be seen below, * but here we need a '#pragma omp flush' so that the request * and acknowledge flags are read correctly by all of the threads. */ #define MME_OMP_FLUSH #ifndef MME_OMP_FLUSH #pragma omp barrier #else #pragma omp flush #endif /* * Calculate the iterations for the log2 reduction of the grad array. * Note that each OpenMP thread determines 'consumer' and 'producer' * from its thread number. */ threadnum = omp_get_thread_num(); iteration = maxthreads - 1; mask = 1; while (iteration > 0) { consumer = threadnum & (~mask); producer = consumer | ((mask + 1) >> 1); /* * 'Consumer' designates a thread to which to add data from a * 'producer' thread. Perform reduction only when both consumer * and producer are less than maxthreads. * * For successive iterations of the loop mask will have the values * 1, 3, 7, 15../ * * The for the example of maxThreads=14 (numThreads=0..13), the * following threads will be chosen by consumer and producer: * * (iteration 1, consumer) - 0, 2, 4, 6, 8, 10, 12 * (iteration 1, producer) - 1, 3, 5, 7, 9, 11, 13 * * (iteration 2, consumer) - 0, 4, 8, 12 * (iteration 2, producer) - 2, 6, 10, 13 * * (iteration 3, consumer) - 0, 8 * (iteration 3, producer) - 4, 12 * * (iteration 4, consumer) - 0 * (iteration 4, producer) - 8 * * As the example shows, the final result is found in the * grad array for thread 0. * * Note that the following if statement uses maxthreads to * determine whether to perform a reduction step. Maxthreads * may not be used to determine whether to execute the while loop * (above) because all threads must execute the while loop in order * that the '#pragma omp barrier' within the loop not hang due to * threads that are not executing the loop. The test for * (threadNum == consumer) guarantees that this thread will accept * data from a producer thread which has (threadNum == producer). * The test for (producer < maxthreads) guarantees that the producer * thread exists. */ if ( ( threadnum == consumer ) && ( producer < maxthreads ) ) { /* * If MME_OMP_FLUSH is defined, four-cycle signaling is used to * synchronize the threads. The 'consumer' thread raises its * request flag then waits for the 'producer' thread to raise * its acknowledge flag. * * The request flag is set by assigning the value of 'iteration' * to reqack[consumer]. The acknowledge flag is set by assigning * the value of iteration to reqack[producer]. This value * is used instead of 1 in order that each iteration of the loop * have a unique value for the request and acknowledge flags. * This approach avoids a race condition across iterations of * the loop for access to the request and acknowledge flags. * * If MME_OMP_FLUSH is not defined then no test is necessary * because '#pragma omp barrier' is used to resynchronize * all threads following each iteration of the while loop. */ #ifdef MME_OMP_FLUSH reqack[consumer] = iteration; #pragma omp flush do { #pragma omp flush } while (reqack[producer] != iteration); #endif /* * The producer and consumer threads are synchronized, * so add the grad array from the producer to the * grad array from the consumer. */ goff = dim * prm->Natom; for (i = 0; i < goff; i++) { grad[consumer * goff + i] += grad[producer * goff + i]; } /* * If MME_OMP_FLUSH is defined, four-cycle signaling is used to * synchronize the threads. The 'consumer' thread lowers its * request flag then waits for the 'producer' thread to lower * its acknowledge flag. */ #ifdef MME_OMP_FLUSH reqack[consumer] = 0; #pragma omp flush do { #pragma omp flush } while (reqack[producer] == iteration); #endif } /* * Here is the if statement that controls whether an OpenMP thread * is a producer. Note that because consumer never equals producer, * a thread cannot be both the consumer and producer during a given * iteration of the while loop. * * Because the grad array contents are copied by the consumer thread, * the producer thread needs only to synchronize via four-cycle * signaling. * * It is necessary not only to check that (threadnum == producer) * but also that (threadnum < maxthreads) to ensure that the * producer thread exists. */ if ( ( threadnum == producer ) && ( threadnum < maxthreads) ) { /* * If MME_OMP_FLUSH is defined, four-cycle signaling is used to * synchronize the threads. The 'producer' thread waits for * the 'consumer' thread to raise its request flag, then raises * its acknowledge flag, then waits for the 'consumer' thread * to lower its request flag (indicating that the 'consumer' * thread has read the data), then lowers its acknowledge flag. */ #ifdef MME_OMP_FLUSH do { #pragma omp flush } while (reqack[consumer] != iteration); reqack[producer] = iteration; #pragma omp flush do { #pragma omp flush } while (reqack[consumer] == iteration); reqack[producer] = 0; #pragma omp flush #endif } /* * If MME_OMP_FLUSH is not defined resynchronize via * '#pragma omp barrier'. */ #ifndef MME_OMP_FLUSH #pragma omp barrier #endif /* Prepare for the next iteration of the while loop. */ mask = (mask << 1) + 1; iteration >>= 1; } } /* Now copy the reduced grad array into the f array. */ for (i = 0; i < dim * prm->Natom; i++) { f[i] = grad[i]; } #else /* * Here is the non-logarithmic reduction of the grad array * under OpenMP. Begin by copying the grad array for thread 0 * into the f array. */ goff = dim * prm->Natom; for (i = 0; i < goff; i++) { f[i] = grad[i]; } /* * Now add the grad arrays for all other threads to the f array. * Each thread copies a portion of each array. The (j,i) loop * nesting is more efficient than (i,j) nesting. * * Note: the following 'if' should not be needed, but works around a * bug in ifort 9.1 on ia64. */ if (maxthreads > 1) { #pragma omp parallel for private(i, j) schedule(dynamic, blocksize) for (j = 0; j < goff; j++) { for (i = 1; i < maxthreads; i++) { f[j] += grad[goff * i + j]; } } } #endif /* Update the reduction time. */ t2 = seconds(); treduce += t2 - t1; t1 = t2; #elif defined(MPI) || defined(SCALAPACK) /* Here is the reduction of the grad array under MPI. */ ierror = MPI_Allreduce(grad, f, dim * prm->Natom, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); if (ierror != MPI_SUCCESS) { fprintf( nabout,"Error in mme34 grad reduction, error = %d mytaskid = %d\n", ierror, mytaskid); } /* Update the reduction time. */ t2 = seconds(); treduce += t2 - t1; t1 = t2; #else /* Here is no reduction of the grad array. Copy it to the f array. */ for (i = 0; i < dim * prm->Natom; i++) { f[i] = grad[i]; } #endif for (k = 0; k < prm->Natom; k++) { /* zero out frozen forces */ if (frozen[k]) { f[dim * k + 0] = f[dim * k + 1] = f[dim * k + 2] = 0.0; if (dim == 4) { f[dim * k + 3] = 0.0; } } } #ifdef PRINT_DERIV k = 0; for (i = 0; i < 105; i++) { k++; fprintf(nabout, "%10.5f", f[i]); if (k % 8 == 0) fprintf(nabout, "\n"); } fprintf(nabout, "\n"); #endif /* Calculate the rms gradient. */ frms = 0.0; for (i = 0; i < dim * prm->Natom; i++) frms += f[i] * f[i]; frms = sqrt(frms / (dim * prm->Natom)); /* Calculate the total energy. */ ene[0] = 0.0; for (k = 1; k <= 12; k++) { ene[0] += ene[k]; } /* If MPI is defined perform a reduction of the ene array. */ #if defined(MPI) || defined(SCALAPACK) ierror = MPI_Allreduce(ene, reductarr, 13, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); if (ierror != MPI_SUCCESS) { fprintf(nabout, "Error in mme34 ene reduction, error = %d mytaskid = %d\n", ierror, mytaskid); } for (i = 0; i <= 12; i++) { ene[i] = reductarr[i]; } #endif /* * Print the energies and rms gradient but only for task zero, * and only for positive values of the iteration counter. */ if (mytaskid == 0) { #if !defined(SPEC) || defined(SPEC_VERBOSE_OUTPUT) if (*iter > -1 && (*iter == 0 || *iter % ntpr == 0 || *iter == max_step)) { fprintf(nabout, "ff:%6d %9.2f %9.2f %9.2f %9.2f %9.2f %9.2f %9.2e\n", *iter, ene[0], ene[3] + ene[4] + ene[5], ene[1] + ene[7], ene[2] + ene[8], ene[9] + ene[11] + ene[12], ene[10], frms); fflush(nabout); } #else /* Only output the total for the first and last iterations */ if (*iter > -1 && (*iter == 0 || *iter == max_step)) { fprintf(nabout, "ff:%6d %9.2f\n", *iter, ene[0]); fflush(nabout); } #endif } /* A value of -1 for the iteration counter is reserved for printing. */ if (*iter == -1) { fprintf(nabout, " bond: %15.9f\n", ene[3]); fprintf(nabout, " angle: %15.9f\n", ene[4]); fprintf(nabout, " dihedral: %15.9f\n", ene[5]); fprintf(nabout, " enb14: %15.9f\n", ene[7]); fprintf(nabout, " eel14: %15.9f\n", ene[8]); fprintf(nabout, " enb: %15.9f\n", ene[1]); fprintf(nabout, " eel: %15.9f\n", ene[2]); fprintf(nabout, " egb: %15.9f\n", ene[10]); fprintf(nabout, " econs: %15.9f\n", ene[9]); fprintf(nabout, " esurf: %15.9f\n", ene[11]); fprintf(nabout, " Total: %15.9f\n", ene[0]); } /* If static_arrays is 0, deallocate the gradient and reqack arrays. */ if (!static_arrays) { if (grad != NULL) { free_vector(grad, 0, maxthreads * dim * n); grad = NULL; } if (reqack != NULL) { free_ivector(reqack, 0, maxthreads); reqack = NULL; } } t2 = seconds(); tmme += t2 - t1; return (ene[0]); } /*********************************************************************** MME_TIMER() ************************************************************************/ /* Print a timing summary but only for task zero. */ int mme_timer(void) { /* Use the maximum time from all MPI tasks or SCALAPACK processes. */ #if defined(MPI) || defined(SCALAPACK) REAL_T timarr[10], reductarr[10]; timarr[0] = tcons; timarr[1] = tbond; timarr[2] = tangl; timarr[3] = tphi; timarr[4] = tpair; timarr[5] = tnonb; timarr[6] = tborn; timarr[7] = tmme; timarr[8] = tconjgrad; timarr[9] = tmd; MPI_Allreduce(timarr, reductarr, 10, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); tcons = reductarr[0]; tbond = reductarr[1]; tangl = reductarr[2]; tphi = reductarr[3]; tpair = reductarr[4]; tnonb = reductarr[5]; tborn = reductarr[6]; tmme = reductarr[7]; tconjgrad = reductarr[8]; tmd = reductarr[9]; #endif if (mytaskid == 0) { fprintf(nabout, "\nFirst derivative timing summary:\n"); fprintf(nabout, " constraints %10.2f\n", tcons); fprintf(nabout, " bonds %10.2f\n", tbond); fprintf(nabout, " angles %10.2f\n", tangl); fprintf(nabout, " torsions %10.2f\n", tphi); fprintf(nabout, " pairlist %10.2f\n", tpair); fprintf(nabout, " nonbonds %10.2f\n", tnonb); fprintf(nabout, " gen. Born %10.2f\n", tborn); fprintf(nabout, " mme %10.2f\n", tmme); fprintf(nabout, " Total %10.2f\n\n", tcons + tbond + tangl + tphi + tpair + tnonb + tborn + tmme); fprintf(nabout, " reduction %10.2f\n", treduce); fprintf(nabout, " molec. dyn. %10.2f\n\n", tmd); fflush(nabout); } return (0); }
GB_binop__plus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__plus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__plus_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__plus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp32) // A*D function (colscale): GB (_AxD__plus_fp32) // D*A function (rowscale): GB (_DxB__plus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__plus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__plus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp32) // C=scalar+B GB (_bind1st__plus_fp32) // C=scalar+B' GB (_bind1st_tran__plus_fp32) // C=A+scalar GB (_bind2nd__plus_fp32) // C=A'+scalar GB (_bind2nd_tran__plus_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_FP32 || GxB_NO_PLUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__plus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
swap.c
#include "heads.h" int find_maxrow(int nthreads, int col) { if (nthreads == 1) { int mx = 0; int idx = 0; for (int i = col; i < SIZE; i++) { double cur = abs(matrix[i][col]); if (cur > mx) { mx = cur; idx = i; } } if (mx == 0) { finish_t = clock(); total_t = (double)(finish_t - start_t) / CLOCKS_PER_SEC; printf("Infinite number of answers\n"); printf("Spent time:%f \n",total_t); exit(0); } return idx; } else { // initialize two arrays to store max number and its row index int* storage = (int*)malloc(nthreads * sizeof(int)); memset(storage, INT_MIN, nthreads * sizeof(int)); int* indexes = (int*)malloc(nthreads * sizeof(int)); for (int i = 0; i < nthreads; i++) { indexes[i] = i; } int jump = nthreads; omp_set_num_threads(nthreads); #pragma omp parallel { int thread_id = omp_get_thread_num(); for (int i = thread_id + col; i < SIZE; i += jump) { double cur = abs(matrix[i][col]); if (cur > storage[thread_id]) { indexes[thread_id] = i; storage[thread_id] = cur; } } } double mx = 0; int idx = 0; for (int i = 0; i < nthreads; i++) { if (storage[i] > mx) { idx = indexes[i]; mx = storage[i]; } } if (mx == 0) { finish_t = clock(); total_t = (double)(finish_t - start_t) / CLOCKS_PER_SEC; printf("Infinite number of answers\n"); printf("Spent time:%f \n",total_t); exit(0); } return idx; } } void swap(int a, int b) { if (a != b) { for (int i = 0; i < SIZE; i++) { double tmp = matrix[a][i]; matrix[a][i] = matrix[b][i]; matrix[b][i] = tmp; } double tmp = vec[a][0]; vec[a][0] = vec[b][0]; vec[b][0] = tmp; } }
GB_unaryop__lnot_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_int8 // op(A') function: GB_tran__lnot_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sw3d_traco128.h
void sw_traco3d_128(){ printf("- traco [1x8x128x16] - \n\n"); int c1,c2,c3,c4,c5,c6,c7,c8,c9,c11,c10,c12,c13,c14,c15,c32; /* #pragma omp parallel for shared(N) private(c1, c2,c3,c4,c5,c6,c7,c8,c9,c11,c10,c12,c13,c14,c15) for( c1 = 0; c1 <= (N - 1)/16; c1 += 1) for( c3 = 0; c3 <= (N - 1) / 16; c3 += 1) for( c5 = 0; c5 <= (N - 1) / 16; c5 += 1) for( c7 = 16 * c1 + 1; c7 <= min(N, 16 * c1 + 16); c7 += 1) for( c9 = 16 * c3 + 1; c9 <= min(N, 16 * c3 + 16); c9 += 1) for( c11 = 16 * c5 + 1; c11 <= min(N, 16 * c5 + 16); c11 += 1) { m1[c7][c9][c11] = INT_MIN; m2[c7][c9][c11] = INT_MIN; m3[c7][c9][c11] = INT_MIN; m4[c7][c9][c11] = INT_MIN; m5[c7][c9][c11] = INT_MIN; m6[c7][c9][c11] = INT_MIN; } for( c1 = 0; c1 <= floord(N - 1, 16); c1 += 1) #pragma omp parallel for shared(c1, N) private(c2,c3,c4,c5,c6,c7,c8,c9,c11,c10,c12,c13,c14,c15) schedule(dynamic, 1) for( c3 = max(0, c1 - (N + 31) / 32 + 1); c3 <= min(c1, (N - 1) / 32); c3 += 1) for( c5 = 0; c5 <= (N - 1) / 32; c5 += 1) for( c6 = 0; c6 <= 6; c6 += 1) { if (c6 == 6) { for( c9 = 32 * c1 - 32 * c3 + 1; c9 <= min(N, 32 * c1 - 32 * c3 + 32); c9 += 1) for( c11 = 32 * c3 + 1; c11 <= min(N, 32 * c3 + 32); c11 += 1) for( c13 = 32 * c5 + 1; c13 <= min(N, 32 * c5 + 32); c13 += 1) { for( c14 = max(0, 32 * c1 - 32 * c3 - c9 + 2); c14 <= min(1, -32 * c3 + c11 - 1); c14 += 1) { if (c14 == 1) { for( c15 = 1; c15 <= c11; c15 += 1) m2[c9][c11][c13] = MAX(m2[c9][c11][c13], H[c9][c11-c15][c13] + W[c15]); } else for( c15 = 1; c15 <= c9; c15 += 1) m1[c9][c11][c13] = MAX(m1[c9][c11][c13] ,H[c9-c15][c11][c13] + W[c15]); } for( c14 = max(2, 32 * c5 - c13 + 4); c14 <= min(min(3, -32 * c1 + 32 * c3 + c9 + 1), -32 * c3 + c11 + 1); c14 += 1) { if (c14 == 3) { for( c15 = 1; c15 <= min(c9, c11); c15 += 1) m4[c9][c11][c13] = MAX(m4[c9][c11][c13], H[c9-c15][c11-c15][c13] + W[c15]); } else for( c15 = 1; c15 <= c13; c15 += 1) m3[c9][c11][c13] = MAX(m3[c9][c11][c13], H[c9][c11][c13-c15] + W[c15]); } if (c13 >= 32 * c5 + 2) for( c14 = max(4, 32 * c3 - c11 + 6); c14 <= min(5, -32 * c1 + 32 * c3 + c9 + 3); c14 += 1) { if (c14 == 5) { for( c15 = 1; c15 <= min(c9, c13); c15 += 1) m6[c9][c11][c13] = MAX(m6[c9][c11][c13], H[c9-c15][c11][c13-c15] + W[c15]); } else for( c15 = 1; c15 <= min(c11, c13); c15 += 1) m5[c9][c11][c13] = MAX(m5[c9][c11][c13], H[c9][c11-c15][c13-c15] + W[c15]); } H[c9][c11][c13] = MAX(0, MAX( H[c9-1][c11-1][c13-1] + s(a[c9], b[c11], c[c13]), MAX(m1[c9][c11][c13], MAX(m2[c9][c11][c13], MAX(m3[c9][c11][c13], MAX(m4[c9][c11][c13], MAX(m5[c9][c11][c13], m6[c9][c11][c13]))))))); } } else if (c6 == 5) { for( c7 = 0; c7 <= min(c1 - c3, c5); c7 += 1) for( c9 = 32 * c1 - 32 * c3 + 1; c9 <= min(N, 32 * c1 - 32 * c3 + 32); c9 += 1) for( c11 = 32 * c3 + 1; c11 <= min(N, 32 * c3 + 32); c11 += 1) { if (32 * c3 + c9 >= 32 * c1 + 2) { for( c15 = 32 * c7 + 1; c15 <= min(min(32 * c5 + 1, 32 * c7 + 32), c9); c15 += 1) m6[c9][c11][(32*c5+1)] = MAX(m6[c9][c11][(32*c5+1)], H[c9-c15][c11][(32*c5+1)-c15] + W[c15]); } else for( c13 = 32 * c5 + 1; c13 <= min(N, 32 * c5 + 32); c13 += 1) for( c15 = 32 * c7 + 1; c15 <= min(min(32 * c1 - 32 * c3 + 1, 32 * c7 + 32), c13); c15 += 1) m6[(32*c1-32*c3+1)][c11][c13] = MAX(m6[(32*c1-32*c3+1)][c11][c13], H[(32*c1-32*c3+1)-c15][c11][c13-c15] + W[c15]); } } else if (c6 == 4) { for( c7 = 0; c7 <= min(c3, c5); c7 += 1) for( c9 = 32 * c1 - 32 * c3 + 1; c9 <= min(N, 32 * c1 - 32 * c3 + 32); c9 += 1) for( c11 = 32 * c3 + 1; c11 <= min(N, 32 * c3 + 32); c11 += 1) { if (c11 >= 32 * c3 + 2) { for( c15 = 32 * c7 + 1; c15 <= min(min(32 * c5 + 1, 32 * c7 + 32), c11); c15 += 1) m5[c9][c11][(32*c5+1)] = MAX(m5[c9][c11][(32*c5+1)], H[c9][c11-c15][(32*c5+1)-c15] + W[c15]); } else for( c13 = 32 * c5 + 1; c13 <= min(N, 32 * c5 + 32); c13 += 1) for( c15 = 32 * c7 + 1; c15 <= min(min(32 * c3 + 1, 32 * c7 + 32), c13); c15 += 1) m5[c9][(32*c3+1)][c13] = MAX(m5[c9][(32*c3+1)][c13], H[c9][(32*c3+1)-c15][c13-c15] + W[c15]); } } else if (c6 == 3) { for( c7 = 0; c7 <= min(c3, c1 - c3); c7 += 1) for( c9 = 32 * c1 - 32 * c3 + 1; c9 <= min(N, 32 * c1 - 32 * c3 + 32); c9 += 1) { if (32 * c3 + c9 >= 32 * c1 + 2) { for( c13 = 32 * c5 + 1; c13 <= min(N, 32 * c5 + 32); c13 += 1) for( c15 = 32 * c7 + 1; c15 <= min(min(32 * c3 + 1, 32 * c7 + 32), c9); c15 += 1) m4[c9][(32*c3+1)][c13] = MAX(m4[c9][(32*c3+1)][c13], H[c9-c15][(32*c3+1)-c15][c13] + W[c15]); } else for( c11 = 32 * c3 + 1; c11 <= min(N, 32 * c3 + 32); c11 += 1) for( c13 = 32 * c5 + 1; c13 <= min(N, 32 * c5 + 32); c13 += 1) for( c15 = 32 * c7 + 1; c15 <= min(min(32 * c1 - 32 * c3 + 1, 32 * c7 + 32), c11); c15 += 1) m4[(32*c1-32*c3+1)][c11][c13] = MAX(m4[(32*c1-32*c3+1)][c11][c13], H[(32*c1-32*c3+1)-c15][c11-c15][c13] + W[c15]); } } else if (c6 == 2) { for( c7 = 0; c7 <= c5; c7 += 1) for( c9 = 32 * c1 - 32 * c3 + 1; c9 <= min(N, 32 * c1 - 32 * c3 + 32); c9 += 1) for( c11 = 32 * c3 + 1; c11 <= min(N, 32 * c3 + 32); c11 += 1) for( c15 = 32 * c7 + 1; c15 <= min(32 * c5 + 1, 32 * c7 + 32); c15 += 1) m3[c9][c11][(32*c5+1)] = MAX(m3[c9][c11][(32*c5+1)], H[c9][c11][(32*c5+1)-c15] + W[c15]); } else if (c6 == 1) { for( c7 = 0; c7 <= c3; c7 += 1) for( c9 = 32 * c1 - 32 * c3 + 1; c9 <= min(N, 32 * c1 - 32 * c3 + 32); c9 += 1) for( c13 = 32 * c5 + 1; c13 <= min(N, 32 * c5 + 32); c13 += 1) for( c15 = 32 * c7 + 1; c15 <= min(32 * c3 + 1, 32 * c7 + 32); c15 += 1) m2[c9][(32*c3+1)][c13] = MAX(m2[c9][(32*c3+1)][c13], H[c9][(32*c3+1)-c15][c13] + W[c15]); } else for( c7 = 0; c7 <= c1 - c3; c7 += 1) for( c11 = 32 * c3 + 1; c11 <= min(N, 32 * c3 + 32); c11 += 1) for( c13 = 32 * c5 + 1; c13 <= min(N, 32 * c5 + 32); c13 += 1) for( c15 = 32 * c7 + 1; c15 <= min(32 * c1 - 32 * c3 + 1, 32 * c7 + 32); c15 += 1) m1[(32*c1-32*c3+1)][c11][c13] = MAX(m1[(32*c1-32*c3+1)][c11][c13] ,H[(32*c1-32*c3+1)-c15][c11][c13] + W[c15]); } */ /* for( c1 = 0; c1 <= floord(N - 1, 4); c1 += 1) #pragma omp parallel for shared(c1, N) private(c2,c3,c4,c5,c6,c7,c8,c9,c11,c10,c12,c13,c14,c15) schedule(dynamic, 1) for( c3 = max(0, c1 - (N + 7) / 8 + 1); c3 <= min(c1, (N - 1) / 8); c3 += 1) for( c5 = 0; c5 <= (N - 1) / 8; c5 += 1) for( c6 = 0; c6 <= 6; c6 += 1) { if (c6 == 6) { for( c9 = 8 * c1 - 8 * c3 + 1; c9 <= min(N, 8 * c1 - 8 * c3 + 8); c9 += 1) for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c13 = 8 * c5 + 1; c13 <= min(N, 8 * c5 + 8); c13 += 1) { for( c14 = max(0, 8 * c1 - 8 * c3 - c9 + 2); c14 <= min(1, -8 * c3 + c11 - 1); c14 += 1) { if (c14 == 1) { for( c15 = 1; c15 <= c11; c15 += 1) m2[c9][c11][c13] = MAX(m2[c9][c11][c13], H[c9][c11-c15][c13] + W[c15]); } else for( c15 = 1; c15 <= c9; c15 += 1) m1[c9][c11][c13] = MAX(m1[c9][c11][c13] ,H[c9-c15][c11][c13] + W[c15]); } for( c14 = max(2, 8 * c5 - c13 + 4); c14 <= min(min(3, -8 * c1 + 8 * c3 + c9 + 1), -8 * c3 + c11 + 1); c14 += 1) { if (c14 == 3) { for( c15 = 1; c15 <= min(c9, c11); c15 += 1) m4[c9][c11][c13] = MAX(m4[c9][c11][c13], H[c9-c15][c11-c15][c13] + W[c15]); } else for( c15 = 1; c15 <= c13; c15 += 1) m3[c9][c11][c13] = MAX(m3[c9][c11][c13], H[c9][c11][c13-c15] + W[c15]); } if (c13 >= 8 * c5 + 2) for( c14 = max(4, 8 * c3 - c11 + 6); c14 <= min(5, -8 * c1 + 8 * c3 + c9 + 3); c14 += 1) { if (c14 == 5) { for( c15 = 1; c15 <= min(c9, c13); c15 += 1) m6[c9][c11][c13] = MAX(m6[c9][c11][c13], H[c9-c15][c11][c13-c15] + W[c15]); } else for( c15 = 1; c15 <= min(c11, c13); c15 += 1) m5[c9][c11][c13] = MAX(m5[c9][c11][c13], H[c9][c11-c15][c13-c15] + W[c15]); } H[c9][c11][c13] = MAX(0, MAX( H[c9-1][c11-1][c13-1] + s(a[c9], b[c11], c[c13]), MAX(m1[c9][c11][c13], MAX(m2[c9][c11][c13], MAX(m3[c9][c11][c13], MAX(m4[c9][c11][c13], MAX(m5[c9][c11][c13], m6[c9][c11][c13]))))))); } } else if (c6 == 5) { for( c7 = 0; c7 <= min(c1 - c3, c5); c7 += 1) for( c9 = 8 * c1 - 8 * c3 + 1; c9 <= min(N, 8 * c1 - 8 * c3 + 8); c9 += 1) for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) { if (8 * c3 + c9 >= 8 * c1 + 2) { for( c15 = 8 * c7 + 1; c15 <= min(min(8 * c5 + 1, 8 * c7 + 8), c9); c15 += 1) m6[c9][c11][(8*c5+1)] = MAX(m6[c9][c11][(8*c5+1)], H[c9-c15][c11][(8*c5+1)-c15] + W[c15]); } else for( c13 = 8 * c5 + 1; c13 <= min(N, 8 * c5 + 8); c13 += 1) for( c15 = 8 * c7 + 1; c15 <= min(min(8 * c1 - 8 * c3 + 1, 8 * c7 + 8), c13); c15 += 1) m6[(8*c1-8*c3+1)][c11][c13] = MAX(m6[(8*c1-8*c3+1)][c11][c13], H[(8*c1-8*c3+1)-c15][c11][c13-c15] + W[c15]); } } else if (c6 == 4) { for( c7 = 0; c7 <= min(c3, c5); c7 += 1) for( c9 = 8 * c1 - 8 * c3 + 1; c9 <= min(N, 8 * c1 - 8 * c3 + 8); c9 += 1) for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) { if (c11 >= 8 * c3 + 2) { for( c15 = 8 * c7 + 1; c15 <= min(min(8 * c5 + 1, 8 * c7 + 8), c11); c15 += 1) m5[c9][c11][(8*c5+1)] = MAX(m5[c9][c11][(8*c5+1)], H[c9][c11-c15][(8*c5+1)-c15] + W[c15]); } else for( c13 = 8 * c5 + 1; c13 <= min(N, 8 * c5 + 8); c13 += 1) for( c15 = 8 * c7 + 1; c15 <= min(min(8 * c3 + 1, 8 * c7 + 8), c13); c15 += 1) m5[c9][(8*c3+1)][c13] = MAX(m5[c9][(8*c3+1)][c13], H[c9][(8*c3+1)-c15][c13-c15] + W[c15]); } } else if (c6 == 3) { for( c7 = 0; c7 <= min(c3, c1 - c3); c7 += 1) for( c9 = 8 * c1 - 8 * c3 + 1; c9 <= min(N, 8 * c1 - 8 * c3 + 8); c9 += 1) { if (8 * c3 + c9 >= 8 * c1 + 2) { for( c13 = 8 * c5 + 1; c13 <= min(N, 8 * c5 + 8); c13 += 1) for( c15 = 8 * c7 + 1; c15 <= min(min(8 * c3 + 1, 8 * c7 + 8), c9); c15 += 1) m4[c9][(8*c3+1)][c13] = MAX(m4[c9][(8*c3+1)][c13], H[c9-c15][(8*c3+1)-c15][c13] + W[c15]); } else for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c13 = 8 * c5 + 1; c13 <= min(N, 8 * c5 + 8); c13 += 1) for( c15 = 8 * c7 + 1; c15 <= min(min(8 * c1 - 8 * c3 + 1, 8 * c7 + 8), c11); c15 += 1) m4[(8*c1-8*c3+1)][c11][c13] = MAX(m4[(8*c1-8*c3+1)][c11][c13], H[(8*c1-8*c3+1)-c15][c11-c15][c13] + W[c15]); } } else if (c6 == 2) { for( c7 = 0; c7 <= c5; c7 += 1) for( c9 = 8 * c1 - 8 * c3 + 1; c9 <= min(N, 8 * c1 - 8 * c3 + 8); c9 += 1) for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c15 = 8 * c7 + 1; c15 <= min(8 * c5 + 1, 8 * c7 + 8); c15 += 1) m3[c9][c11][(8*c5+1)] = MAX(m3[c9][c11][(8*c5+1)], H[c9][c11][(8*c5+1)-c15] + W[c15]); } else if (c6 == 1) { for( c7 = 0; c7 <= c3; c7 += 1) for( c9 = 8 * c1 - 8 * c3 + 1; c9 <= min(N, 8 * c1 - 8 * c3 + 8); c9 += 1) for( c13 = 8 * c5 + 1; c13 <= min(N, 8 * c5 + 8); c13 += 1) for( c15 = 8 * c7 + 1; c15 <= min(8 * c3 + 1, 8 * c7 + 8); c15 += 1) m2[c9][(8*c3+1)][c13] = MAX(m2[c9][(8*c3+1)][c13], H[c9][(8*c3+1)-c15][c13] + W[c15]); } else for( c7 = 0; c7 <= c1 - c3; c7 += 1) for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c13 = 8 * c5 + 1; c13 <= min(N, 8 * c5 + 8); c13 += 1) for( c15 = 8 * c7 + 1; c15 <= min(8 * c1 - 8 * c3 + 1, 8 * c7 + 8); c15 += 1) m1[(8*c1-8*c3+1)][c11][c13] = MAX(m1[(8*c1-8*c3+1)][c11][c13] ,H[(8*c1-8*c3+1)-c15][c11][c13] + W[c15]); } /* for( c1 = 0; c1 < N + floord(N - 1, 8); c1 += 1) #pragma omp parallel for shared(c1, N) private(c2,c3,c4,c5,c6,c7,c8,c9,c11,c10,c12,c13,c14,c15) schedule(dynamic, 1) for( c3 = max(0, -N + c1 + 1); c3 <= min(c1, (N - 1) / 8); c3 += 1) for( c5 = 0; c5 <= (N - 1) / 128; c5 += 1) for( c6 = 0; c6 <= 6; c6 += 1) { if (c6 == 6) { for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c13 = 128 * c5 + 1; c13 <= min(N, 128 * c5 + 128); c13 += 1) { for( c14 = max(1, 8 * c3 - c11 + 3); c14 <= min(2, -128 * c5 + c13); c14 += 1) { if (c14 == 2) { for( c15 = 1; c15 <= c13; c15 += 1) m3[(c1-c3+1)][c11][c13] = MAX(m3[(c1-c3+1)][c11][c13], H[(c1-c3+1)][c11][c13-c15] + W[c15]); } else for( c15 = 1; c15 <= c11; c15 += 1) m2[(c1-c3+1)][c11][c13] = MAX(m2[(c1-c3+1)][c11][c13], H[(c1-c3+1)][c11-c15][c13] + W[c15]); } if (c11 >= 8 * c3 + 2 && c13 >= 128 * c5 + 2) for( c15 = 1; c15 <= min(c11, c13); c15 += 1) m5[(c1-c3+1)][c11][c13] = MAX(m5[(c1-c3+1)][c11][c13], H[(c1-c3+1)][c11-c15][c13-c15] + W[c15]); H[(c1-c3+1)][c11][c13] = MAX(0, MAX( H[(c1-c3+1)-1][c11-1][c13-1] + s(a[(c1-c3+1)], b[c11], c[c13]), MAX(m1[(c1-c3+1)][c11][c13], MAX(m2[(c1-c3+1)][c11][c13], MAX(m3[(c1-c3+1)][c11][c13], MAX(m4[(c1-c3+1)][c11][c13], MAX(m5[(c1-c3+1)][c11][c13], m6[(c1-c3+1)][c11][c13]))))))); } } else if (c6 == 5) { for( c7 = 0; c7 <= min(8 * c5 + 7, (c1 - c3) / 16); c7 += 1) for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c13 = max(128 * c5 + 1, 16 * c7 + 1); c13 <= min(N, 128 * c5 + 128); c13 += 1) for( c15 = 16 * c7 + 1; c15 <= min(min(c1 - c3 + 1, 16 * c7 + 16), c13); c15 += 1) m6[(c1-c3+1)][c11][c13] = MAX(m6[(c1-c3+1)][c11][c13], H[(c1-c3+1)-c15][c11][c13-c15] + W[c15]); } else if (c6 == 4) { for( c7 = 0; c7 <= min(8 * c5 + 7, c3 / 2); c7 += 1) { for( c13 = max(128 * c5 + 1, 16 * c7 + 1); c13 <= min(N, 128 * c5 + 128); c13 += 1) for( c15 = 16 * c7 + 1; c15 <= min(min(8 * c3 + 1, 16 * c7 + 16), c13); c15 += 1) m5[(c1-c3+1)][(8*c3+1)][c13] = MAX(m5[(c1-c3+1)][(8*c3+1)][c13], H[(c1-c3+1)][(8*c3+1)-c15][c13-c15] + W[c15]); for( c11 = 8 * c3 + 2; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c15 = 16 * c7 + 1; c15 <= min(min(128 * c5 + 1, 16 * c7 + 16), c11); c15 += 1) m5[(c1-c3+1)][c11][(128*c5+1)] = MAX(m5[(c1-c3+1)][c11][(128*c5+1)], H[(c1-c3+1)][c11-c15][(128*c5+1)-c15] + W[c15]); } } else if (c6 == 3) { for( c7 = 0; c7 <= min(c3 / 2, (c1 - c3) / 16); c7 += 1) for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c13 = 128 * c5 + 1; c13 <= min(N, 128 * c5 + 128); c13 += 1) for( c15 = 16 * c7 + 1; c15 <= min(min(c1 - c3 + 1, 16 * c7 + 16), c11); c15 += 1) m4[(c1-c3+1)][c11][c13] = MAX(m4[(c1-c3+1)][c11][c13], H[(c1-c3+1)-c15][c11-c15][c13] + W[c15]); } else if (c6 == 2) { for( c7 = 0; c7 <= 8 * c5; c7 += 1) for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c15 = 16 * c7 + 1; c15 <= min(128 * c5 + 1, 16 * c7 + 16); c15 += 1) m3[(c1-c3+1)][c11][(128*c5+1)] = MAX(m3[(c1-c3+1)][c11][(128*c5+1)], H[(c1-c3+1)][c11][(128*c5+1)-c15] + W[c15]); } else if (c6 == 1) { for( c7 = 0; c7 <= c3 / 2; c7 += 1) for( c13 = 128 * c5 + 1; c13 <= min(N, 128 * c5 + 128); c13 += 1) for( c15 = 16 * c7 + 1; c15 <= min(8 * c3 + 1, 16 * c7 + 16); c15 += 1) m2[(c1-c3+1)][(8*c3+1)][c13] = MAX(m2[(c1-c3+1)][(8*c3+1)][c13], H[(c1-c3+1)][(8*c3+1)-c15][c13] + W[c15]); } else for( c7 = 0; c7 <= (c1 - c3) / 16; c7 += 1) for( c11 = 8 * c3 + 1; c11 <= min(N, 8 * c3 + 8); c11 += 1) for( c13 = 128 * c5 + 1; c13 <= min(N, 128 * c5 + 128); c13 += 1) for( c15 = 16 * c7 + 1; c15 <= min(c1 - c3 + 1, 16 * c7 + 16); c15 += 1) m1[(c1-c3+1)][c11][c13] = MAX(m1[(c1-c3+1)][c11][c13] ,H[(c1-c3+1)-c15][c11][c13] + W[c15]); } */ /*for( c1 = 0; c1 <= floord(N - 1, 8); c1 += 1) #pragma omp parallel for shared(c1, N) private(c2,c3,c4,c5,c6,c7,c8,c9,c11,c10,c12,c13,c14,c15) schedule(dynamic, 1) for( c3 = max(0, c1 - (N + 15) / 16 + 1); c3 <= min(c1, (N - 1) / 16); c3 += 1) for( c5 = 0; c5 <= (N - 1) / 16; c5 += 1) for( c6 = 0; c6 <= 6; c6 += 1) { if (c6 == 6) { for( c9 = 16 * c1 - 16 * c3 + 1; c9 <= min(N, 16 * c1 - 16 * c3 + 16); c9 += 1) for( c11 = 16 * c3 + 1; c11 <= min(N, 16 * c3 + 16); c11 += 1) for( c13 = 16 * c5 + 1; c13 <= min(N, 16 * c5 + 16); c13 += 1) { for( c14 = max(0, 16 * c1 - 16 * c3 - c9 + 2); c14 <= min(1, -16 * c3 + c11 - 1); c14 += 1) { if (c14 == 1) { for( c32 = 1; c32 <= c11; c32 += 1) m2[c9][c11][c13] = MAX(m2[c9][c11][c13], H[c9][c11-c32][c13] + W[c32]); } else for( c32 = 1; c32 <= c9; c32 += 1) m1[c9][c11][c13] = MAX(m1[c9][c11][c13] ,H[c9-c32][c11][c13] + W[c32]); } for( c14 = max(2, 16 * c5 - c13 + 4); c14 <= min(min(3, -16 * c1 + 16 * c3 + c9 + 1), -16 * c3 + c11 + 1); c14 += 1) { if (c14 == 3) { for( c32 = 1; c32 <= min(c9, c11); c32 += 1) m4[c9][c11][c13] = MAX(m4[c9][c11][c13], H[c9-c32][c11-c32][c13] + W[c32]); } else for( c32 = 1; c32 <= c13; c32 += 1) m3[c9][c11][c13] = MAX(m3[c9][c11][c13], H[c9][c11][c13-c32] + W[c32]); } if (c13 >= 16 * c5 + 2) for( c14 = max(4, 16 * c3 - c11 + 6); c14 <= min(5, -16 * c1 + 16 * c3 + c9 + 3); c14 += 1) { if (c14 == 5) { for( c32 = 1; c32 <= min(c9, c13); c32 += 1) m6[c9][c11][c13] = MAX(m6[c9][c11][c13], H[c9-c32][c11][c13-c32] + W[c32]); } else for( c32 = 1; c32 <= min(c11, c13); c32 += 1) m5[c9][c11][c13] = MAX(m5[c9][c11][c13], H[c9][c11-c32][c13-c32] + W[c32]); } H[c9][c11][c13] = MAX(0, MAX( H[c9-1][c11-1][c13-1] + s(a[c9], b[c11], c[c13]), MAX(m1[c9][c11][c13], MAX(m2[c9][c11][c13], MAX(m3[c9][c11][c13], MAX(m4[c9][c11][c13], MAX(m5[c9][c11][c13], m6[c9][c11][c13]))))))); } } else if (c6 == 5) { for( c7 = 0; c7 <= min(c1 - c3, c5); c7 += 1) for( c9 = 16 * c1 - 16 * c3 + 1; c9 <= min(N, 16 * c1 - 16 * c3 + 16); c9 += 1) for( c11 = 16 * c3 + 1; c11 <= min(N, 16 * c3 + 16); c11 += 1) { if (16 * c3 + c9 >= 16 * c1 + 2) { for( c32 = 16 * c7 + 1; c32 <= min(min(16 * c5 + 1, 16 * c7 + 16), c9); c32 += 1) m6[c9][c11][(16*c5+1)] = MAX(m6[c9][c11][(16*c5+1)], H[c9-c32][c11][(16*c5+1)-c32] + W[c32]); } else for( c13 = 16 * c5 + 1; c13 <= min(N, 16 * c5 + 16); c13 += 1) for( c32 = 16 * c7 + 1; c32 <= min(min(16 * c1 - 16 * c3 + 1, 16 * c7 + 16), c13); c32 += 1) m6[(16*c1-16*c3+1)][c11][c13] = MAX(m6[(16*c1-16*c3+1)][c11][c13], H[(16*c1-16*c3+1)-c32][c11][c13-c32] + W[c32]); } } else if (c6 == 4) { for( c7 = 0; c7 <= min(c3, c5); c7 += 1) for( c9 = 16 * c1 - 16 * c3 + 1; c9 <= min(N, 16 * c1 - 16 * c3 + 16); c9 += 1) for( c11 = 16 * c3 + 1; c11 <= min(N, 16 * c3 + 16); c11 += 1) { if (c11 >= 16 * c3 + 2) { for( c32 = 16 * c7 + 1; c32 <= min(min(16 * c5 + 1, 16 * c7 + 16), c11); c32 += 1) m5[c9][c11][(16*c5+1)] = MAX(m5[c9][c11][(16*c5+1)], H[c9][c11-c32][(16*c5+1)-c32] + W[c32]); } else for( c13 = 16 * c5 + 1; c13 <= min(N, 16 * c5 + 16); c13 += 1) for( c32 = 16 * c7 + 1; c32 <= min(min(16 * c3 + 1, 16 * c7 + 16), c13); c32 += 1) m5[c9][(16*c3+1)][c13] = MAX(m5[c9][(16*c3+1)][c13], H[c9][(16*c3+1)-c32][c13-c32] + W[c32]); } } else if (c6 == 3) { for( c7 = 0; c7 <= min(c3, c1 - c3); c7 += 1) for( c9 = 16 * c1 - 16 * c3 + 1; c9 <= min(N, 16 * c1 - 16 * c3 + 16); c9 += 1) { if (16 * c3 + c9 >= 16 * c1 + 2) { for( c13 = 16 * c5 + 1; c13 <= min(N, 16 * c5 + 16); c13 += 1) for( c32 = 16 * c7 + 1; c32 <= min(min(16 * c3 + 1, 16 * c7 + 16), c9); c32 += 1) m4[c9][(16*c3+1)][c13] = MAX(m4[c9][(16*c3+1)][c13], H[c9-c32][(16*c3+1)-c32][c13] + W[c32]); } else for( c11 = 16 * c3 + 1; c11 <= min(N, 16 * c3 + 16); c11 += 1) for( c13 = 16 * c5 + 1; c13 <= min(N, 16 * c5 + 16); c13 += 1) for( c32 = 16 * c7 + 1; c32 <= min(min(16 * c1 - 16 * c3 + 1, 16 * c7 + 16), c11); c32 += 1) m4[(16*c1-16*c3+1)][c11][c13] = MAX(m4[(16*c1-16*c3+1)][c11][c13], H[(16*c1-16*c3+1)-c32][c11-c32][c13] + W[c32]); } } else if (c6 == 2) { for( c7 = 0; c7 <= c5; c7 += 1) for( c9 = 16 * c1 - 16 * c3 + 1; c9 <= min(N, 16 * c1 - 16 * c3 + 16); c9 += 1) for( c11 = 16 * c3 + 1; c11 <= min(N, 16 * c3 + 16); c11 += 1) for( c32 = 16 * c7 + 1; c32 <= min(16 * c5 + 1, 16 * c7 + 16); c32 += 1) m3[c9][c11][(16*c5+1)] = MAX(m3[c9][c11][(16*c5+1)], H[c9][c11][(16*c5+1)-c32] + W[c32]); } else if (c6 == 1) { for( c7 = 0; c7 <= c3; c7 += 1) for( c9 = 16 * c1 - 16 * c3 + 1; c9 <= min(N, 16 * c1 - 16 * c3 + 16); c9 += 1) for( c13 = 16 * c5 + 1; c13 <= min(N, 16 * c5 + 16); c13 += 1) for( c32 = 16 * c7 + 1; c32 <= min(16 * c3 + 1, 16 * c7 + 16); c32 += 1) m2[c9][(16*c3+1)][c13] = MAX(m2[c9][(16*c3+1)][c13], H[c9][(16*c3+1)-c32][c13] + W[c32]); } else for( c7 = 0; c7 <= c1 - c3; c7 += 1) for( c11 = 16 * c3 + 1; c11 <= min(N, 16 * c3 + 16); c11 += 1) for( c13 = 16 * c5 + 1; c13 <= min(N, 16 * c5 + 16); c13 += 1) for( c32 = 16 * c7 + 1; c32 <= min(16 * c1 - 16 * c3 + 1, 16 * c7 + 16); c32 += 1) m1[(16*c1-16*c3+1)][c11][c13] = MAX(m1[(16*c1-16*c3+1)][c11][c13] ,H[(16*c1-16*c3+1)-c32][c11][c13] + W[c32]); } */ }
dft_dft_solver.h
#ifndef _DFT_DFT_SOLVER_ #define _DFT_DFT_SOLVER_ #include <complex> #include "toefl/toefl.h" #include "blueprint.h" #include "equations.h" namespace toefl { /*! @brief Solver for periodic boundary conditions of the toefl equations. * @ingroup solvers */ template< size_t n> class DFT_DFT_Solver { public: typedef Matrix<double, TL_DFT> Matrix_Type; /*! @brief Construct a solver for periodic boundary conditions * * The constructor allocates storage for the solver * and initializes all fourier coefficients as well as * all low level solvers needed. * @param blueprint Contains all the necessary parameters. * @throw Message If your parameters are inconsistent. */ DFT_DFT_Solver( const Blueprint& blueprint); /*! @brief Prepare Solver for execution * * This function takes the fields and computes the missing * one according to the target parameter passed. * @param v Container with three non void matrices * @param t which Matrix is missing? */ void init( std::array< Matrix<double,TL_DFT>, n>& v, enum target t); /** * @brief Perform first initializing step * */ void first_step(); /** * @brief Perform second initializing step * * After that the step function can be used */ void second_step(); /*! @brief Perform a step by the 3 step Karniadakis scheme * * @attention At least one call of first_step() and second_step() is necessary * */ void step(){ step_<TL_ORDER3>();} /*! @brief Get the result You get the solution matrix of the current timestep. @param t The field you want @return A Read only reference to the field @attention The reference is only valid until the next call to the step() function! */ const Matrix<double, TL_DFT>& getField( enum target t) const; /*! @brief Get the result Use this function when you want to call step() without destroying the solution. @param m In exchange for the solution matrix you have to provide storage for further calculations. The field is swapped in. @param t The field you want. @attention The fields you get are not the ones of the current timestep. You get the fields that are not needed any more. This means the densities are 4 timesteps "old" whereas the potential is the one of the last timestep. */ void getField( Matrix<double, TL_DFT>& m, enum target t); const std::array<Matrix<double, TL_DFT>, n>& getDensity( )const{return dens;} const std::array<Matrix<double, TL_DFT>, n>& getPotential( )const{return phi;} /*! @brief Get the parameters of the solver. @return The parameters in use. @note You cannot change parameters once constructed. */ const Blueprint& blueprint() const { return blue;} private: typedef std::complex<double> complex; //methods void init_coefficients( const Boundary& bound, const Physical& phys); void compute_cphi();//multiply cphi double dot( const Matrix_Type& m1, const Matrix_Type& m2); template< enum stepper S> void step_(); //members const size_t rows, cols; const size_t crows, ccols; const Blueprint blue; /////////////////fields////////////////////////////////// //GhostMatrix<double, TL_DFT> ghostdens, ghostphi; std::array< Matrix<double, TL_DFT>, n> dens, phi, nonlinear; /////////////////Complex (void) Matrices for fourier transforms/////////// std::array< Matrix< complex>, n> cdens, cphi; ///////////////////Solvers//////////////////////// Arakawa arakawa; Karniadakis<n, complex, TL_DFT> karniadakis; DFT_DFT dft_dft; /////////////////////Coefficients////////////////////// Matrix< std::array< double, n> > phi_coeff; std::array< Matrix< double>, n-1> gamma_coeff; }; template< size_t n> DFT_DFT_Solver<n>::DFT_DFT_Solver( const Blueprint& bp): rows( bp.algorithmic().ny ), cols( bp.algorithmic().nx ), crows( rows), ccols( cols/2+1), blue( bp), //fields dens( MatrixArray<double, TL_DFT,n>::construct( rows, cols)), phi( dens), nonlinear( dens), cdens( MatrixArray<complex, TL_NONE, n>::construct( crows, ccols)), cphi(cdens), //Solvers arakawa( bp.algorithmic().h), karniadakis(rows, cols, crows, ccols, bp.algorithmic().dt), dft_dft( rows, cols, FFTW_MEASURE), //Coefficients phi_coeff( crows, ccols), gamma_coeff( MatrixArray< double, TL_NONE, n-1>::construct( crows, ccols)) { bp.consistencyCheck(); if( bp.isEnabled( TL_GLOBAL)) { std::cerr << "WARNING: GLOBAL solver not implemented yet! \n\ Switch to local solver...\n"; } init_coefficients( bp.boundary(), bp.physical()); } template< size_t n> void DFT_DFT_Solver<n>::init_coefficients( const Boundary& bound, const Physical& phys) { Matrix< QuadMat< complex, n> > coeff( crows, ccols); double laplace; int ik; const complex dymin( 0, 2.*M_PI/bound.ly); const double kxmin2 = 2.*2.*M_PI*M_PI/(double)(bound.lx*bound.lx), kymin2 = 2.*2.*M_PI*M_PI/(double)(bound.ly*bound.ly); Equations e( phys, blue.isEnabled( TL_MHW)); Poisson p( phys); // dft_dft is not transposing so i is the y index by default for( unsigned i = 0; i<crows; i++) for( unsigned j = 0; j<ccols; j++) { ik = (i>rows/2) ? (i-rows) : i; //integer division rounded down laplace = - kxmin2*(double)(j*j) - kymin2*(double)(ik*ik); if( n == 2) { gamma_coeff[0](i,j) = p.gamma1_i( laplace); } else if( n == 3) { gamma_coeff[0](i,j) = p.gamma1_i( laplace); gamma_coeff[1](i,j) = p.gamma1_z( laplace); } if( rows%2 == 0 && i == rows/2) ik = 0; e( coeff( i,j), laplace, (double)ik*dymin); if( laplace == 0) continue; p( phi_coeff(i,j), laplace); } //for periodic bc the constant is undefined for( unsigned k=0; k<n; k++) phi_coeff(0,0)[k] = 0; karniadakis.init_coeff( coeff, (double)(rows*cols)); } template< size_t n> void DFT_DFT_Solver<n>::init( std::array< Matrix<double, TL_DFT>,n>& v, enum target t) { //fourier transform input into cdens for( unsigned k=0; k<n; k++) { #ifdef TL_DEBUG if( v[k].isVoid()) throw Message("You gave me a void Matrix!!", _ping_); #endif dft_dft.r2c( v[k], cdens[k]); } //don't forget to normalize coefficients!! for( unsigned k=0; k<n; k++) for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols;j++) cdens[k](i,j) /= (double)(rows*cols); switch( t) //which field must be computed? { case( TL_ELECTRONS): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>0; k--) swap_fields( cdens[k], cdens[k-1]); //now solve for cdens[0] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[0](i,j) = cphi[0](i,j)/phi_coeff(i,j)[0]; for( unsigned k=0; k<n && k!=0; k++) cdens[0](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[0]; } break; case( TL_IONS): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>1; k--) swap_fields( cdens[k], cdens[k-1]); //solve for cdens[1] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[1](i,j) = cphi[0](i,j) /phi_coeff(i,j)[1]; for( unsigned k=0; k<n && k!=1; k++) cdens[1](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[1]; } break; case( TL_IMPURITIES): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>2; k--) //i.e. never for n = 3 swap_fields( cdens[k], cdens[k-1]); //solve for cdens[2] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[2](i,j) = cphi[0](i,j) /phi_coeff(i,j)[2]; for( unsigned k=0; k<n && k!=2; k++) cdens[2](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[2]; } break; case( TL_POTENTIAL): //solve for cphi for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cphi[0](i,j) = 0; for( unsigned k=0; k<n && k!=2; k++) cphi[0](i,j) += cdens[k](i,j)*phi_coeff(i,j)[k]; } break; case( TL_ALL): throw Message( "TL_ALL not treated yet!", _ping_); } //compute the rest cphi[k] for( unsigned k=0; k<n-1; k++) for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[k+1](i,j) = gamma_coeff[k](i,j)*cphi[0](i,j); //backtransform to x-space for( unsigned k=0; k<n; k++) { //set (0,0) mode 0 again cdens[k](0,0) = 0; cphi[k](0,0) = 0; dft_dft.c2r( cdens[k], dens[k]); dft_dft.c2r( cphi[k], phi[k]); } //now the density and the potential is given in x-space //first_steps(); } template< size_t n> void DFT_DFT_Solver<n>::getField( Matrix<double, TL_DFT>& m, enum target t) { #ifdef TL_DEBUG if(m.isVoid()) throw Message( "You may not swap in a void Matrix!\n", _ping_); #endif switch( t) { case( TL_ELECTRONS): swap_fields( m, nonlinear[0]); break; case( TL_IONS): swap_fields( m, nonlinear[1]); break; case( TL_IMPURITIES): swap_fields( m, nonlinear[2]); break; case( TL_POTENTIAL): swap_fields( m, cphi[0]); break; case( TL_ALL): throw Message( "TL_ALL not allowed here", _ping_); } } template< size_t n> const Matrix<double, TL_DFT>& DFT_DFT_Solver<n>::getField( enum target t) const { Matrix<double, TL_DFT> const * m = 0; switch( t) { case( TL_ELECTRONS): m = &dens[0]; break; case( TL_IONS): m = &dens[1]; break; case( TL_IMPURITIES): m = &dens[2]; break; case( TL_POTENTIAL): m = &phi[0]; break; case( TL_ALL): throw Message( "TL_ALL not allowed here", _ping_); } return *m; } template< size_t n> void DFT_DFT_Solver<n>::first_step() { karniadakis.template invert_coeff<TL_EULER>( ); step_<TL_EULER>(); } template< size_t n> void DFT_DFT_Solver<n>::second_step() { karniadakis.template invert_coeff<TL_ORDER2>(); step_<TL_ORDER2>(); karniadakis.template invert_coeff<TL_ORDER3>(); } template< size_t n> void DFT_DFT_Solver<n>::compute_cphi() { if( n==2) { #pragma omp parallel for for( size_t i = 0; i < crows; i++){ for( size_t j = 0; j < ccols; j++) cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j) + phi_coeff(i,j)[1]*cdens[1](i,j); } //#pragma omp barrier #pragma omp parallel for for( size_t i = 0; i < crows; i++){ for( size_t j = 0; j < ccols; j++) cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j); } //#pragma omp barrier } else if( n==3) { #pragma omp parallel for for( size_t i = 0; i < crows; i++){ for( size_t j = 0; j < ccols; j++) cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j) + phi_coeff(i,j)[1]*cdens[1](i,j) + phi_coeff(i,j)[2]*cdens[2](i,j); } //#pragma omp barrier #pragma omp parallel for for( size_t i = 0; i < crows; i++){ for( size_t j = 0; j < ccols; j++) { cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j); cphi[2](i,j) = gamma_coeff[1](i,j)*cphi[0](i,j); } } //#pragma omp barrier } } template< size_t n> template< enum stepper S> void DFT_DFT_Solver<n>::step_() { //1. Compute nonlinearity #pragma omp parallel for for( unsigned k=0; k<n; k++) { GhostMatrix<double, TL_DFT> ghostdens{ rows, cols, TL_PERIODIC, TL_PERIODIC}; GhostMatrix<double, TL_DFT> ghostphi{ rows, cols, TL_PERIODIC, TL_PERIODIC}; swap_fields( dens[k], ghostdens); //now dens[k] is void swap_fields( phi[k], ghostphi); //now phi[k] is void ghostdens.initGhostCells( ); ghostphi.initGhostCells( ); arakawa( ghostdens, ghostphi, nonlinear[k]); swap_fields( dens[k], ghostdens); //now ghostdens is void swap_fields( phi[k], ghostphi); //now ghostphi is void } //2. perform karniadakis step karniadakis.template step_i<S>( dens, nonlinear); //3. solve linear equation //3.1. transform v_hut #pragma omp parallel for for( unsigned k=0; k<n; k++){ dft_dft.r2c( dens[k], cdens[k]);} //3.2. perform karniadaksi step and multiply coefficients for phi karniadakis.step_ii( cdens); compute_cphi(); //3.3. backtransform #pragma omp parallel for for( unsigned k=0; k<n; k++) { dft_dft.c2r( cdens[k], dens[k]); dft_dft.c2r( cphi[k], phi[k]); } } } //namespace toefl #endif //_DFT_DFT_SOLVER_
sink-3.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ /* Test that we can handle multiple undeclared sink variables gracefully. */ void bar (int *); void foo () { int i,j; #pragma omp parallel for ordered(1) for (i=0; i < 100; ++i) { #pragma omp ordered depend(sink:poo-1,paa+1) /* { dg-error "poo.*declared.*paa.*declared" } */ bar(&i); #pragma omp ordered depend(source) } }
vision.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V V IIIII SSSSS IIIII OOO N N % % V V I SS I O O NN N % % V V I SSS I O O N N N % % V V I SS I O O N NN % % V IIIII SSSSS IIIII OOO N N % % % % % % MagickCore Computer Vision Methods % % % % Software Design % % Cristy % % September 2014 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/opencl-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/vision.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n n e c t e d C o m p o n e n t s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConnectedComponentsImage() returns the connected-components of the image % uniquely labeled. The returned connected components image colors member % defines the number of unique objects. Choose from 4 or 8-way connectivity. % % You are responsible for freeing the connected components objects resources % with this statement; % % objects = (CCObjectInfo *) RelinquishMagickMemory(objects); % % The format of the ConnectedComponentsImage method is: % % Image *ConnectedComponentsImage(const Image *image, % const size_t connectivity,CCObjectInfo **objects, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o connectivity: how many neighbors to visit, choose from 4 or 8. % % o objects: return the attributes of each unique object. % % o exception: return any errors or warnings in this structure. % */ static int CCObjectInfoCompare(const void *x,const void *y) { CCObjectInfo *p, *q; p=(CCObjectInfo *) x; q=(CCObjectInfo *) y; return((int) (q->area-(ssize_t) p->area)); } static void PerimeterThreshold(const Image *component_image, CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; RectangleInfo bounding_box; size_t pattern[4] = { 1, 0, 0, 0 }; ssize_t y; /* Compute perimeter of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=(-1); y < (ssize_t) bounding_box.height+1; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1, bounding_box.y+y,bounding_box.width+2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=(-1); x < (ssize_t) bounding_box.width+1; x++) { Quantum pixels[4]; ssize_t v; size_t foreground; /* An Algorithm for Calculating Objects’ Shape Features in Binary Images, Lifeng He, Yuyan Chao. */ foreground=0; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { ssize_t offset; offset=v*(bounding_box.width+2)* GetPixelChannels(component_image)+u* GetPixelChannels(component_image); pixels[2*v+u]=GetPixelIndex(component_image,p+offset); if ((ssize_t) pixels[2*v+u] == i) foreground++; } } if (foreground == 1) pattern[1]++; else if (foreground == 2) { if ((((ssize_t) pixels[0] == i) && ((ssize_t) pixels[3] == i)) || (((ssize_t) pixels[1] == i) && ((ssize_t) pixels[2] == i))) pattern[0]++; /* diagonal */ else pattern[2]++; } else if (foreground == 3) pattern[3]++; p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[metric_index]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+ MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5); } } static void CircularityThreshold(const Image *component_image, CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; RectangleInfo bounding_box; size_t pattern[4] = { 1, 0, 0, 0 }; ssize_t y; /* Compute perimeter of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=(-1); y < (ssize_t) bounding_box.height; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1, bounding_box.y+y,bounding_box.width+2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=(-1); x < (ssize_t) bounding_box.width; x++) { Quantum pixels[4]; ssize_t v; size_t foreground; /* An Algorithm for Calculating Objects’ Shape Features in Binary Images, Lifeng He, Yuyan Chao. */ foreground=0; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { ssize_t offset; offset=v*(bounding_box.width+2)* GetPixelChannels(component_image)+u* GetPixelChannels(component_image); pixels[2*v+u]=GetPixelIndex(component_image,p+offset); if ((ssize_t) pixels[2*v+u] == i) foreground++; } } if (foreground == 1) pattern[1]++; else if (foreground == 2) { if ((((ssize_t) pixels[0] == i) && ((ssize_t) pixels[3] == i)) || (((ssize_t) pixels[1] == i) && ((ssize_t) pixels[2] == i))) pattern[0]++; /* diagonal */ else pattern[2]++; } else if (foreground == 3) pattern[3]++; p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[metric_index]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+ MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5); object[i].metric[metric_index]=4.0*MagickPI*object[i].area/ (object[i].metric[metric_index]*object[i].metric[metric_index]); } } static void MajorAxisThreshold(const Image *component_image, CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; ssize_t y; /* Compute ellipse major axis of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[metric_index]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+ sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); } } static void MinorAxisThreshold(const Image *component_image, CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; ssize_t y; /* Compute ellipse major axis of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[metric_index]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)- sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); } } static void EccentricityThreshold(const Image *component_image, CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }, ellipse_axis = { 0.0, 0.0 }; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; ssize_t y; /* Compute eccentricity of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); ellipse_axis.x=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+ sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); ellipse_axis.y=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)- sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); object[i].metric[metric_index]=sqrt(1.0-(ellipse_axis.y*ellipse_axis.y* PerceptibleReciprocal(ellipse_axis.x*ellipse_axis.x))); } } static void AngleThreshold(const Image *component_image, CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; ssize_t y; /* Compute ellipse angle of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[metric_index]=RadiansToDegrees(1.0/2.0*atan(2.0*M11* PerceptibleReciprocal(M20-M02))); if (fabs(M11) < 0.0) { if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0)) object[i].metric[metric_index]+=90.0; } else if (M11 < 0.0) { if (fabs(M20-M02) >= 0.0) { if ((M20-M02) < 0.0) object[i].metric[metric_index]+=90.0; else object[i].metric[metric_index]+=180.0; } } else if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0)) object[i].metric[metric_index]+=90.0; } } MagickExport Image *ConnectedComponentsImage(const Image *image, const size_t connectivity,CCObjectInfo **objects,ExceptionInfo *exception) { #define ConnectedComponentsImageTag "ConnectedComponents/Image" CacheView *component_view, *image_view, *object_view; CCObjectInfo *object; char *c; const char *artifact, *metrics[CCMaxMetrics]; double max_threshold, min_threshold; Image *component_image; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *equivalences; ssize_t i; size_t size; ssize_t background_id, connect4[2][2] = { { -1, 0 }, { 0, -1 } }, connect8[4][2] = { { -1, -1 }, { -1, 0 }, { -1, 1 }, { 0, -1 } }, dx, dy, first, last, n, step, y; /* Initialize connected components image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (objects != (CCObjectInfo **) NULL) *objects=(CCObjectInfo *) NULL; component_image=CloneImage(image,0,0,MagickTrue,exception); if (component_image == (Image *) NULL) return((Image *) NULL); component_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (AcquireImageColormap(component_image,MaxColormapSize,exception) == MagickFalse) { component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize connected components equivalences. */ size=image->columns*image->rows; if (image->columns != (size/image->rows)) { component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } equivalences=AcquireMatrixInfo(size,1,sizeof(ssize_t),exception); if (equivalences == (MatrixInfo *) NULL) { component_image=DestroyImage(component_image); return((Image *) NULL); } for (n=0; n < (ssize_t) (image->columns*image->rows); n++) (void) SetMatrixElement(equivalences,n,0,&n); object=(CCObjectInfo *) AcquireQuantumMemory(MaxColormapSize,sizeof(*object)); if (object == (CCObjectInfo *) NULL) { equivalences=DestroyMatrixInfo(equivalences); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(object,0,MaxColormapSize*sizeof(*object)); for (i=0; i < (ssize_t) MaxColormapSize; i++) { object[i].id=i; object[i].bounding_box.x=(ssize_t) image->columns; object[i].bounding_box.y=(ssize_t) image->rows; GetPixelInfo(image,&object[i].color); } /* Find connected components. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++) { if (status == MagickFalse) continue; dx=connectivity > 4 ? connect8[n][1] : connect4[n][1]; dy=connectivity > 4 ? connect8[n][0] : connect4[n][0]; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y-1,image->columns,3,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel, target; ssize_t neighbor_offset, obj, offset, ox, oy, root; /* Is neighbor an authentic pixel and a different color than the pixel? */ GetPixelInfoPixel(image,p,&pixel); if (((x+dx) < 0) || ((x+dx) >= (ssize_t) image->columns) || ((y+dy) < 0) || ((y+dy) >= (ssize_t) image->rows)) { p+=GetPixelChannels(image); continue; } neighbor_offset=dy*(GetPixelChannels(image)*image->columns)+dx* GetPixelChannels(image); GetPixelInfoPixel(image,p+neighbor_offset,&target); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { p+=GetPixelChannels(image); continue; } /* Resolve this equivalence. */ offset=y*image->columns+x; neighbor_offset=dy*image->columns+dx; ox=offset; status=GetMatrixElement(equivalences,ox,0,&obj); while (obj != ox) { ox=obj; status=GetMatrixElement(equivalences,ox,0,&obj); } oy=offset+neighbor_offset; status=GetMatrixElement(equivalences,oy,0,&obj); while (obj != oy) { oy=obj; status=GetMatrixElement(equivalences,oy,0,&obj); } if (ox < oy) { status=SetMatrixElement(equivalences,oy,0,&ox); root=ox; } else { status=SetMatrixElement(equivalences,ox,0,&oy); root=oy; } ox=offset; status=GetMatrixElement(equivalences,ox,0,&obj); while (obj != root) { status=GetMatrixElement(equivalences,ox,0,&obj); status=SetMatrixElement(equivalences,ox,0,&root); } oy=offset+neighbor_offset; status=GetMatrixElement(equivalences,oy,0,&obj); while (obj != root) { status=GetMatrixElement(equivalences,oy,0,&obj); status=SetMatrixElement(equivalences,oy,0,&root); } status=SetMatrixElement(equivalences,y*image->columns+x,0,&root); p+=GetPixelChannels(image); } } } /* Label connected components. */ n=0; component_view=AcquireAuthenticCacheView(component_image,exception); for (y=0; y < (ssize_t) component_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(component_view,0,y,component_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) component_image->columns; x++) { ssize_t id, offset; offset=y*image->columns+x; status=GetMatrixElement(equivalences,offset,0,&id); if (id != offset) status=GetMatrixElement(equivalences,id,0,&id); else { id=n++; if (id >= (ssize_t) MaxColormapSize) break; } status=SetMatrixElement(equivalences,offset,0,&id); if (x < object[id].bounding_box.x) object[id].bounding_box.x=x; if (x >= (ssize_t) object[id].bounding_box.width) object[id].bounding_box.width=(size_t) x; if (y < object[id].bounding_box.y) object[id].bounding_box.y=y; if (y >= (ssize_t) object[id].bounding_box.height) object[id].bounding_box.height=(size_t) y; object[id].color.red+=QuantumScale*GetPixelRed(image,p); object[id].color.green+=QuantumScale*GetPixelGreen(image,p); object[id].color.blue+=QuantumScale*GetPixelBlue(image,p); if (image->alpha_trait != UndefinedPixelTrait) object[id].color.alpha+=QuantumScale*GetPixelAlpha(image,p); if (image->colorspace == CMYKColorspace) object[id].color.black+=QuantumScale*GetPixelBlack(image,p); object[id].centroid.x+=x; object[id].centroid.y+=y; object[id].area++; SetPixelIndex(component_image,(Quantum) id,q); p+=GetPixelChannels(image); q+=GetPixelChannels(component_image); } if (n > (ssize_t) MaxColormapSize) break; if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,ConnectedComponentsImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } component_view=DestroyCacheView(component_view); image_view=DestroyCacheView(image_view); equivalences=DestroyMatrixInfo(equivalences); if (n > (ssize_t) MaxColormapSize) { object=(CCObjectInfo *) RelinquishMagickMemory(object); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"TooManyObjects"); } background_id=0; min_threshold=0.0; max_threshold=0.0; component_image->colors=(size_t) n; for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width-=(object[i].bounding_box.x-1); object[i].bounding_box.height-=(object[i].bounding_box.y-1); object[i].color.red/=(QuantumScale*object[i].area); object[i].color.green/=(QuantumScale*object[i].area); object[i].color.blue/=(QuantumScale*object[i].area); if (image->alpha_trait != UndefinedPixelTrait) object[i].color.alpha/=(QuantumScale*object[i].area); if (image->colorspace == CMYKColorspace) object[i].color.black/=(QuantumScale*object[i].area); object[i].centroid.x/=object[i].area; object[i].centroid.y/=object[i].area; max_threshold+=object[i].area; if (object[i].area > object[background_id].area) background_id=i; } max_threshold+=MagickEpsilon; n=(-1); artifact=GetImageArtifact(image,"connected-components:background-id"); if (artifact != (const char *) NULL) background_id=(ssize_t) StringToLong(artifact); artifact=GetImageArtifact(image,"connected-components:area-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max area threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].area < min_threshold) || (object[i].area >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:keep-colors"); if (artifact != (const char *) NULL) { const char *p; /* Keep selected objects based on color, merge others. */ for (i=0; i < (ssize_t) component_image->colors; i++) object[i].merge=MagickTrue; for (p=artifact; ; ) { char color[MagickPathExtent]; PixelInfo pixel; const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,&pixel,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse) object[i].merge=MagickFalse; if (*q == '\0') break; p=q+1; } } artifact=GetImageArtifact(image,"connected-components:keep-ids"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"connected-components:keep"); if (artifact != (const char *) NULL) { /* Keep selected objects based on id, merge others. */ for (i=0; i < (ssize_t) component_image->colors; i++) object[i].merge=MagickTrue; for (c=(char *) artifact; *c != '\0'; ) { while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ',')) c++; first=(ssize_t) strtol(c,&c,10); if (first < 0) first+=(ssize_t) component_image->colors; last=first; while (isspace((int) ((unsigned char) *c)) != 0) c++; if (*c == '-') { last=(ssize_t) strtol(c+1,&c,10); if (last < 0) last+=(ssize_t) component_image->colors; } step=(ssize_t) (first > last ? -1 : 1); for ( ; first != (last+step); first+=step) object[first].merge=MagickFalse; } } artifact=GetImageArtifact(image,"connected-components:keep-top"); if (artifact != (const char *) NULL) { CCObjectInfo *top_objects; ssize_t top_ids; /* Keep top objects. */ top_ids=(ssize_t) StringToLong(artifact); top_objects=(CCObjectInfo *) AcquireQuantumMemory(component_image->colors, sizeof(*top_objects)); if (top_objects == (CCObjectInfo *) NULL) { object=(CCObjectInfo *) RelinquishMagickMemory(object); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(top_objects,object,component_image->colors*sizeof(*object)); qsort((void *) top_objects,component_image->colors,sizeof(*top_objects), CCObjectInfoCompare); for (i=top_ids+1; i < (ssize_t) component_image->colors; i++) object[top_objects[i].id].merge=MagickTrue; top_objects=(CCObjectInfo *) RelinquishMagickMemory(top_objects); } artifact=GetImageArtifact(image,"connected-components:remove-colors"); if (artifact != (const char *) NULL) { const char *p; /* Remove selected objects based on color, keep others. */ for (p=artifact; ; ) { char color[MagickPathExtent]; PixelInfo pixel; const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,&pixel,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse) object[i].merge=MagickTrue; if (*q == '\0') break; p=q+1; } } artifact=GetImageArtifact(image,"connected-components:remove-ids"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"connected-components:remove"); if (artifact != (const char *) NULL) for (c=(char *) artifact; *c != '\0'; ) { /* Remove selected objects based on id, keep others. */ while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ',')) c++; first=(ssize_t) strtol(c,&c,10); if (first < 0) first+=(ssize_t) component_image->colors; last=first; while (isspace((int) ((unsigned char) *c)) != 0) c++; if (*c == '-') { last=(ssize_t) strtol(c+1,&c,10); if (last < 0) last+=(ssize_t) component_image->colors; } step=(ssize_t) (first > last ? -1 : 1); for ( ; first != (last+step); first+=step) object[first].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:perimeter-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max perimeter threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="perimeter"; PerimeterThreshold(image,object,n,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:circularity-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max circularity threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="circularity"; CircularityThreshold(image,object,n,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:diameter-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max diameter threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="diameter"; for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].metric[n]=ceil(sqrt(4.0*object[i].area/MagickPI)-0.5); if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } } artifact=GetImageArtifact(image,"connected-components:major-axis-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max ellipse major threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="major-axis"; MajorAxisThreshold(component_image,object,n,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:minor-axis-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max ellipse minor threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="minor-axis"; MinorAxisThreshold(component_image,object,n,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:eccentricity-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max eccentricity threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="eccentricy"; EccentricityThreshold(component_image,object,n,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:angle-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max ellipse angle threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="angle"; AngleThreshold(component_image,object,n,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } /* Merge any object not within the min and max area threshold. */ component_view=AcquireAuthenticCacheView(component_image,exception); object_view=AcquireVirtualCacheView(component_image,exception); for (i=0; i < (ssize_t) component_image->colors; i++) { RectangleInfo bounding_box; size_t id; ssize_t j; if (status == MagickFalse) continue; if ((object[i].merge == MagickFalse) || (i == background_id)) continue; /* keep object */ /* Merge this object. */ for (j=0; j < (ssize_t) component_image->colors; j++) object[j].census=0; bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) bounding_box.width; x++) { size_t k; if (status == MagickFalse) continue; j=(ssize_t) GetPixelIndex(component_image,p); if (j == i) for (k=0; k < (ssize_t) (connectivity > 4 ? 4 : 2); k++) { const Quantum *q; /* Compute area of adjacent objects. */ if (status == MagickFalse) continue; dx=connectivity > 4 ? connect8[k][1] : connect4[k][1]; dy=connectivity > 4 ? connect8[k][0] : connect4[k][0]; q=GetCacheViewVirtualPixels(object_view,bounding_box.x+x+dx, bounding_box.y+y+dy,1,1,exception); if (q == (const Quantum *) NULL) { status=MagickFalse; break; } j=(ssize_t) GetPixelIndex(component_image,q); if (j != i) object[j].census++; } p+=GetPixelChannels(component_image); } } /* Merge with object of greatest adjacent area. */ id=0; for (j=1; j < (ssize_t) component_image->colors; j++) if (object[j].census > object[id].census) id=(size_t) j; object[i].area=0.0; for (y=0; y < (ssize_t) bounding_box.height; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,q) == i) SetPixelIndex(component_image,(Quantum) id,q); q+=GetPixelChannels(component_image); } if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse) status=MagickFalse; } } object_view=DestroyCacheView(object_view); component_view=DestroyCacheView(component_view); artifact=GetImageArtifact(image,"connected-components:mean-color"); if (IsStringTrue(artifact) != MagickFalse) { /* Replace object with mean color. */ for (i=0; i < (ssize_t) component_image->colors; i++) component_image->colormap[i]=object[i].color; } (void) SyncImage(component_image,exception); artifact=GetImageArtifact(image,"connected-components:verbose"); if ((IsStringTrue(artifact) != MagickFalse) || (objects != (CCObjectInfo **) NULL)) { /* Report statistics on each unique object. */ for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width=0; object[i].bounding_box.height=0; object[i].bounding_box.x=(ssize_t) component_image->columns; object[i].bounding_box.y=(ssize_t) component_image->rows; object[i].centroid.x=0; object[i].centroid.y=0; object[i].census=object[i].area == 0.0 ? 0.0 : 1.0; object[i].area=0; } component_view=AcquireVirtualCacheView(component_image,exception); for (y=0; y < (ssize_t) component_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,0,y,component_image->columns, 1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) component_image->columns; x++) { size_t id; id=(size_t) GetPixelIndex(component_image,p); if (x < object[id].bounding_box.x) object[id].bounding_box.x=x; if (x > (ssize_t) object[id].bounding_box.width) object[id].bounding_box.width=(size_t) x; if (y < object[id].bounding_box.y) object[id].bounding_box.y=y; if (y > (ssize_t) object[id].bounding_box.height) object[id].bounding_box.height=(size_t) y; object[id].centroid.x+=x; object[id].centroid.y+=y; object[id].area++; p+=GetPixelChannels(component_image); } } for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width-=(object[i].bounding_box.x-1); object[i].bounding_box.height-=(object[i].bounding_box.y-1); object[i].centroid.x=object[i].centroid.x/object[i].area; object[i].centroid.y=object[i].centroid.y/object[i].area; } component_view=DestroyCacheView(component_view); qsort((void *) object,component_image->colors,sizeof(*object), CCObjectInfoCompare); if (objects == (CCObjectInfo **) NULL) { ssize_t j; artifact=GetImageArtifact(image, "connected-components:exclude-header"); if (IsStringTrue(artifact) == MagickFalse) { (void) fprintf(stdout,"Objects ("); artifact=GetImageArtifact(image, "connected-components:exclude-ids"); if (IsStringTrue(artifact) == MagickFalse) (void) fprintf(stdout,"id: "); (void) fprintf(stdout,"bounding-box centroid area mean-color"); for (j=0; j <= n; j++) (void) fprintf(stdout," %s",metrics[j]); (void) fprintf(stdout,"):\n"); } for (i=0; i < (ssize_t) component_image->colors; i++) if (object[i].census > 0.0) { char mean_color[MagickPathExtent]; GetColorTuple(&object[i].color,MagickFalse,mean_color); (void) fprintf(stdout," "); artifact=GetImageArtifact(image, "connected-components:exclude-ids"); if (IsStringTrue(artifact) == MagickFalse) (void) fprintf(stdout,"%.20g: ",(double) object[i].id); (void) fprintf(stdout, "%.20gx%.20g%+.20g%+.20g %.1f,%.1f %.*g %s",(double) object[i].bounding_box.width,(double) object[i].bounding_box.height,(double) object[i].bounding_box.x,(double) object[i].bounding_box.y, object[i].centroid.x,object[i].centroid.y, GetMagickPrecision(),(double) object[i].area,mean_color); for (j=0; j <= n; j++) (void) fprintf(stdout," %.*g",GetMagickPrecision(), object[i].metric[j]); (void) fprintf(stdout,"\n"); } } } if (objects == (CCObjectInfo **) NULL) object=(CCObjectInfo *) RelinquishMagickMemory(object); else *objects=object; return(component_image); }
axcrypt_fmt_plug.c
/* AxCrypt 1.x encrypted files cracker patch for JtR * 2016 by Fist0urs <eddy.maaalou at gmail.com>. * * This software is Copyright (c) 2016, Fist0urs <eddy.maaalou at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_axcrypt; #elif FMT_REGISTERS_H john_register_one(&fmt_axcrypt); #else #include <string.h> #include <stdint.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "dyna_salt.h" #include "sha.h" #include "aes.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "axcrypt" #define FORMAT_NAME "AxCrypt" #define FORMAT_TAG "$axcrypt$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 /* actual max is 250 */ #define BINARY_SIZE 0 #define SALT_SIZE sizeof(struct custom_salt *) #define BINARY_ALIGN MEM_ALIGN_NONE #define SALT_ALIGN sizeof(struct custom_salt *) /* constant value recommended by FIPS */ #define AES_WRAPPING_IV "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6" #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define PUT_64BITS_XOR_MSB(cp, value) ( \ (cp)[0] ^= (unsigned char)((value)), \ (cp)[1] ^= (unsigned char)((value) >> 8), \ (cp)[2] ^= (unsigned char)((value) >> 16), \ (cp)[3] ^= (unsigned char)((value) >> 24 ) ) static struct fmt_tests axcrypt_tests[] = { /* formats can be: $axcrypt$*version*iterations*salt*wrappedkey $axcrypt$*version*iterations*salt*wrappedkey*key-file */ {"$axcrypt$*1*1337*0fd9e7e2f907f480f8af162564f8f94b*af10c88878ba4e2c89b12586f93b7802453121ee702bc362", "Bab00nmoNCo|\\|2$inge"}, {"$axcrypt$*1*60000*7522aa07694d441e47f8faad8a8cb984*95e02b7ccbdc27c227a80d1307505d8b769e87b32f312aa1", "nuNuche<3rewshauv"}, {"$axcrypt$*1*31014*3408ae91dddc0b1750ed4223fd843364*1cc0f8fa8d89f44d284d0562ac7e93848c86ce9605907129", "tr0pO$phere5apointzero"}, /* axcrypt created key-file */ {"$axcrypt$*1*38574*ce4f58c1e85df1ea921df6d6c05439b4*3278c3c730f7887b1008e852e59997e2196710a5c6bc1813*66664a6b2074434a4520374d73592055626979204a6b755520736d6b4b20394e694a205548444320524578562065674b33202f42593d", "0v3rgo2|<fc!"}, /* custom key-file */ {"$axcrypt$*1*130885*8eb4d745f7ac3f7505bcf14e8ce7e3b4*5221a6e8277e90b0b4f16f7871fca02986fca55c0dec5e59*22486520646f65736e2774206c696b652047656f726765204d69636861656c3a20426f6f6f6f6f6f220d0a0d0a49206665656c20736f20756e737572650d0a417320492074616b6520796f75722068616e6420616e64206c65616420796f7520746f207468652062616e6365666c6f6f720d0a417320746865206d75736963207374617274732c20736f6d657468696e6720696e20796f757220657965730d0a43616c6c7320746f206d696e642074686520676f6c64656e2073637265656e0d0a416e6420616c6c206974277320736169642069732068690d0a0d0a49276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f277265206e6f74206120666f6f6c0d0a0d0a53686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a54696d652063616e206e65766572206d656e640d0a54686520636172656c657373207768697370657273206f66206120676f6f6420667269656e640d0a546f2074686520686561727420616e64206d696e640d0a49676e6f72616e6365206973206b696e640d0a54686572652773206e6f20636f6d666f727420696e207468652074727574680d0a5061696e20697320616c6c20796f75276c6c2066696e640d0a0d0a49276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f75277265206e6f74206120666f6f6c0d0a0d0a492073686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a4e6576657220776974686f757420796f7572206c6f76650d0a0d0a546f6e6967687420746865206d75736963207365656d7320736f206c6f75640d0a492077697368207468617420776520636f756c64206c6f736520746869732063726f77640d0a4d617962652069742773206265747465722074686973207761790d0a5765276420687572742065616368206f74686572207769746820746865207468696e677320776527642077616e7420746f207361790d0a0d0a576520636f756c642068617665206265656e20736f20676f6f6420746f6765746865720d0a576520636f756c642068617665206c6976656420746869732064616e636520666f72657665720d0a427574206e6f772077686f277320676f6e6e612064616e63652077697468206d650d0a506c6561736520737461790d0a0d0a416e642049276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f75277265206e6f74206120666f6f6c0d0a0d0a53686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a284e6f77207468617420796f7527726520676f6e6529204e6f77207468617420796f7527726520676f6e650d0a284e6f77207468617420796f7527726520676f6e65292057686174204920646964277320736f2077726f6e672c20736f2077726f6e670d0a5468617420796f752068616420746f206c65617665206d6520616c6f6e65", "careless whisper"}, {NULL} }; static char (*saved_key) [PLAINTEXT_LENGTH + 1]; static int any_cracked, *cracked; static size_t cracked_size; static struct custom_salt { dyna_salt dsalt; int version; uint32_t key_wrapping_rounds; unsigned char salt[16]; unsigned char wrappedkey[24]; char* keyfile; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc(cracked_size, 1); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; char *ctcopy; char *keeptr; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$axcrypt$*" */ if ((p = strtokm(ctcopy, "*")) == NULL) /* version */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) != 32 || !ishexlc(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* wrappedkey */ goto err; if (strlen(p) != 48 || !ishexlc(p)) goto err; /* optional key-file following */ MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; static void *ptr; cs.keyfile = NULL; ctcopy += FORMAT_TAG_LEN; /* skip over "$axcrypt$*" */ p = strtokm(ctcopy, "*"); cs.version = atoi(p); p = strtokm(NULL, "*"); cs.key_wrapping_rounds = (uint32_t) atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 24; i++) cs.wrappedkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; /* if key-file present */ if ((p = strtokm(NULL, "*")) != NULL){ cs.keyfile = (char*) mem_calloc_tiny(strlen(p)/2 + 1, sizeof(char)); for (i = 0; i < strlen(p)/2; i++) cs.keyfile[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } MEM_FREE(keeptr); cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, salt); cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, salt, wrappedkey, 0); cs.dsalt.salt_alloc_needs_free = 0; ptr = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); memcpy(ptr, &cs, sizeof(struct custom_salt)); return (void *) &ptr; } static void set_salt(void *salt) { cur_salt = *(struct custom_salt **) salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { /* NUMBER_AES_BLOCKS = 2 AES_BLOCK_SIZE = 16 */ unsigned char KEK[20], lsb[24], cipher[16]; AES_KEY akey; SHA_CTX ctx; int i, j, nb_iterations = cur_salt->key_wrapping_rounds; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *) saved_key[index], strlen(saved_key[index])); /* if key-file provided */ if (cur_salt->keyfile != NULL) SHA1_Update(&ctx, (unsigned char *) cur_salt->keyfile, strlen(cur_salt->keyfile)); SHA1_Final( KEK, &ctx ); /* hash XOR salt => KEK */ for (i = 0; i < sizeof(cur_salt->salt); i++) KEK[i] ^= cur_salt->salt[i]; memcpy(lsb, cur_salt->wrappedkey + 8, 16); memset(&akey, 0, sizeof(AES_KEY)); AES_set_decrypt_key(KEK, 128, &akey); /* set msb */ memcpy(cipher, cur_salt->wrappedkey, 8); /* custom AES un-wrapping loop */ for (j = nb_iterations - 1; j >= 0; j--) { /* 1st block treatment */ /* MSB XOR (NUMBER_AES_BLOCKS * j + i) */ PUT_64BITS_XOR_MSB(cipher, 2 * j + 2); /* R[i] */ memcpy(cipher + 8, lsb + 8, 8); /* AES_ECB(KEK, (MSB XOR (NUMBER_AES_BLOCKS * j + i)) | R[i]) */ AES_decrypt(cipher, cipher, &akey); memcpy(lsb + 8, cipher + 8, 8); /* 2nd block treatment */ PUT_64BITS_XOR_MSB(cipher, 2 * j + 1); memcpy(cipher + 8, lsb, 8); AES_decrypt(cipher, cipher, &akey); memcpy(lsb, cipher + 8, 8); } if (!memcmp(cipher, AES_WRAPPING_IV, 8)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return cracked[index]; } static void axcrypt_set_key(char *key, int index) { int saved_len = strlen(key); memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_axcrypt = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, axcrypt_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, NULL, set_salt, axcrypt_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif
ecn2_opt.c
/* * MIRACL E(F_p^2) support functions * mrecn2.c * */ #include <stdlib.h> #include "miracl.h" #ifdef MR_STATIC #include <string.h> #endif static inline void zzn2_div2_i(zzn2 *w) { moddiv2(w->a->w); w->a->len=2; moddiv2(w->b->w); w->b->len=2; } static inline void zzn2_tim2_i(zzn2 *w) { #ifdef MR_COUNT_OPS fpa+=2; #endif modtim2(w->a->w); modtim2(w->b->w); w->a->len=2; w->b->len=2; } static inline void zzn2_tim3_i(zzn2 *w) { #ifdef MR_COUNT_OPS fpa+=4; #endif modtim3(w->a->w); modtim3(w->b->w); w->a->len=2; w->b->len=2; } static inline void zzn2_copy_i(zzn2 *x,zzn2 *w) { if (x==w) return; w->a->len=x->a->len; w->a->w[0]=x->a->w[0]; w->a->w[1]=x->a->w[1]; w->b->len=x->b->len; w->b->w[0]=x->b->w[0]; w->b->w[1]=x->b->w[1]; } static inline void zzn2_add_i(zzn2 *x,zzn2 *y,zzn2 *w) { #ifdef MR_COUNT_OPS fpa+=2; #endif modadd(x->a->w,y->a->w,w->a->w); modadd(x->b->w,y->b->w,w->b->w); w->a->len=2; w->b->len=2; } static inline void zzn2_sub_i(zzn2 *x,zzn2 *y,zzn2 *w) { #ifdef MR_COUNT_OPS fpa+=2; #endif modsub(x->a->w,y->a->w,w->a->w); modsub(x->b->w,y->b->w,w->b->w); w->a->len=2; w->b->len=2; } static inline void zzn2_timesi_i(zzn2 *u) { mr_small w1[2]; w1[0]=u->a->w[0]; w1[1]=u->a->w[1]; u->a->w[0]=u->b->w[0]; u->a->w[1]=u->b->w[1]; modneg(u->a->w); u->b->w[0]=w1[0]; u->b->w[1]=w1[1]; } static inline void zzn2_txx_i(zzn2 *u) { /* multiply w by t^2 where x^2-t is irreducible polynomial for ZZn4 for p=5 mod 8 t=sqrt(sqrt(-2)), qnr=-2 for p=3 mod 8 t=sqrt(1+sqrt(-1)), qnr=-1 for p=7 mod 8 and p=2,3 mod 5 t=sqrt(2+sqrt(-1)), qnr=-1 */ zzn2 t; struct bigtype aa,bb; big a,b; mr_small w3[2],w4[2]; a=&aa; b=&bb; a->len=2; b->len=2; a->w=w3; b->w=w4; t.a=a; t.b=b; zzn2_copy_i(u,&t); zzn2_timesi_i(u); zzn2_add_i(u,&t,u); zzn2_add_i(u,&t,u); u->a->len=2; u->b->len=2; } static inline void zzn2_pmul_i(int i,zzn2 *x) { modpmul(i,x->a->w); modpmul(i,x->b->w); } static inline void zzn2_sqr_i(zzn2 *x,zzn2 *w) { static mr_small w1[2],w2[2]; #ifdef MR_COUNT_OPS fpa+=3; fpc+=2; #endif modadd(x->a->w,x->b->w,w1); modsub(x->a->w,x->b->w,w2); modmult(x->a->w,x->b->w,w->b->w); modmult(w1,w2,w->a->w); // routine that calculates (a+b)(a-b) ?? modtim2(w->b->w); w->a->len=2; w->b->len=2; } static inline void zzn2_dblsub_i(zzn2 *x,zzn2 *y,zzn2 *w) { #ifdef MR_COUNT_OPS fpa+=4; #endif moddblsub(w->a->w,x->a->w,y->a->w); moddblsub(w->b->w,x->b->w,y->b->w); w->a->len=2; w->b->len=2; } static inline void zzn2_mul_i(zzn2 *x,zzn2 *y,zzn2 *w) { static mr_small w1[2],w2[2],w5[2]; #ifdef MR_COUNT_OPS fpa+=5; fpc+=3; #endif /*#pragma omp parallel sections { #pragma omp section */ modmult(x->a->w,y->a->w,w1); /* #pragma omp section */ modmult(x->b->w,y->b->w,w2); /*}*/ modadd(x->a->w,x->b->w,w5); modadd(y->a->w,y->b->w,w->b->w); modmult(w->b->w,w5,w->b->w); moddblsub(w->b->w,w1,w2); /* w->b->w - w1 -w2 */ modsub(w1,w2,w->a->w); w->a->len=2; w->b->len=2; } void zzn2_inv_i(_MIPD_ zzn2 *w) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; #ifdef MR_COUNT_OPS fpc+=4; fpa+=1; #endif MR_IN(163) modsqr(w->a->w,mr_mip->w1->w); modsqr(w->b->w,mr_mip->w2->w); modadd(mr_mip->w1->w,mr_mip->w2->w,mr_mip->w1->w); mr_mip->w1->len=2; /* redc(_MIPP_ mr_mip->w1,mr_mip->w6); */ copy(mr_mip->w1,mr_mip->w6); xgcd(_MIPP_ mr_mip->w6,mr_mip->modulus,mr_mip->w6,mr_mip->w6,mr_mip->w6); /* nres(_MIPP_ mr_mip->w6,mr_mip->w6); */ modmult(w->a->w,mr_mip->w6->w,w->a->w); modneg(mr_mip->w6->w); modmult(w->b->w,mr_mip->w6->w,w->b->w); MR_OUT } BOOL nres_sqroot(_MIPD_ big x,big w) { /* w=sqrt(x) mod p. This depends on p being prime! */ int i,t,js; #ifdef MR_COUNT_OPS fpc+=125; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return FALSE; copy(x,w); if (size(w)==0) return TRUE; copy(w,mr_mip->w1); for (i=0;i<25;i++) { modsqr(w->w,w->w); modsqr(w->w,w->w); modsqr(w->w,w->w); modsqr(w->w,w->w); modsqr(w->w,w->w); } w->len=2; modsqr(w->w,mr_mip->w2->w); mr_mip->w2->len=2; if (mr_compare(mr_mip->w1,mr_mip->w2)!=0) {zero(w);return FALSE;} return TRUE; } BOOL zzn2_sqrt(_MIPD_ zzn2 *u,zzn2 *w) { /* sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2)) where i*i=n */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifdef MR_COUNT_OPS fpc+=2; fpa+=1; #endif if (mr_mip->ERNUM) return FALSE; zzn2_copy(u,w); if (zzn2_iszero(w)) return TRUE; MR_IN(204) modsqr(w->b->w,mr_mip->w7->w); modsqr(w->a->w,mr_mip->w1->w); modadd(mr_mip->w1->w,mr_mip->w7->w,mr_mip->w7->w); mr_mip->w7->len=2; // nres_modmult(_MIPP_ w->b,w->b,mr_mip->w7); // nres_modmult(_MIPP_ w->a,w->a,mr_mip->w1); // nres_modadd(_MIPP_ mr_mip->w7,mr_mip->w1,mr_mip->w7); if (!nres_sqroot(_MIPP_ mr_mip->w7,mr_mip->w7)) /* s=w7 */ { zzn2_zero(w); MR_OUT return FALSE; } #ifdef MR_COUNT_OPS fpa+=1; #endif modadd(w->a->w,mr_mip->w7->w,mr_mip->w15->w); moddiv2(mr_mip->w15->w); mr_mip->w15->len=2; // nres_modadd(_MIPP_ w->a,mr_mip->w7,mr_mip->w15); // nres_div2(_MIPP_ mr_mip->w15,mr_mip->w15); if (!nres_sqroot(_MIPP_ mr_mip->w15,mr_mip->w15)) { #ifdef MR_COUNT_OPS fpa+=1; #endif modsub(w->a->w,mr_mip->w7->w,mr_mip->w15->w); moddiv2(mr_mip->w15->w); mr_mip->w15->len=2; // nres_modsub(_MIPP_ w->a,mr_mip->w7,mr_mip->w15); // nres_div2(_MIPP_ mr_mip->w15,mr_mip->w15); if (!nres_sqroot(_MIPP_ mr_mip->w15,mr_mip->w15)) { zzn2_zero(w); MR_OUT return FALSE; } // else printf("BBBBBBBBBBBBBBBBBB\n"); } // else printf("AAAAAAAAAAAAAAAAAAA\n"); #ifdef MR_COUNT_OPS fpa+=1; #endif copy(mr_mip->w15,w->a); modadd(mr_mip->w15->w,mr_mip->w15->w,mr_mip->w15->w); nres_moddiv(_MIPP_ w->b,mr_mip->w15,w->b); MR_OUT return TRUE; } /* BOOL zzn2_multi_inverse(_MIPD_ int m,zzn2 *x,zzn2 *w) { int i; zzn2 t1,t2; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (m==0) return TRUE; if (m<0) return FALSE; MR_IN(214) if (x==w) { mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS); MR_OUT return FALSE; } if (m==1) { zzn2_copy_i(&x[0],&w[0]); zzn2_inv_i(_MIPP_ &w[0]); MR_OUT return TRUE; } zzn2_from_int(_MIPP_ 1,&w[0]); zzn2_copy_i(&x[0],&w[1]); for (i=2;i<m;i++) { if (zzn2_isunity(_MIPP_ &x[i-1])) zzn2_copy_i(&w[i-1],&w[i]); else zzn2_mul_i(&w[i-1],&x[i-1],&w[i]); } t1.a=mr_mip->w8; t1.b=mr_mip->w9; t2.a=mr_mip->w10; t2.b=mr_mip->w11; zzn2_mul_i(&w[m-1],&x[m-1],&t1); if (zzn2_iszero(&t1)) { mr_berror(_MIPP_ MR_ERR_DIV_BY_ZERO); MR_OUT return FALSE; } zzn2_inv_i(_MIPP_ &t1); zzn2_copy_i(&x[m-1],&t2); zzn2_mul_i(&w[m-1],&t1,&w[m-1]); for (i=m-2;;i--) { if (i==0) { zzn2_mul_i(&t2,&t1,&w[0]); break; } zzn2_mul_i(&w[i],&t2,&w[i]); zzn2_mul_i(&w[i],&t1,&w[i]); if (!zzn2_isunity(_MIPP_ &x[i])) zzn2_mul_i(&t2,&x[i],&t2); } MR_OUT return TRUE; } */ BOOL ecn2_iszero(ecn2 *a) { if (a->marker==MR_EPOINT_INFINITY) return TRUE; return FALSE; } void ecn2_copy(ecn2 *a,ecn2 *b) { zzn2_copy_i(&(a->x),&(b->x)); zzn2_copy_i(&(a->y),&(b->y)); #ifndef MR_AFFINE_ONLY if (a->marker==MR_EPOINT_GENERAL) zzn2_copy_i(&(a->z),&(b->z)); #endif b->marker=a->marker; } void ecn2_zero(ecn2 *a) { zzn2_zero(&(a->x)); zzn2_zero(&(a->y)); #ifndef MR_AFFINE_ONLY if (a->marker==MR_EPOINT_GENERAL) zzn2_zero(&(a->z)); #endif a->marker=MR_EPOINT_INFINITY; } BOOL ecn2_compare(_MIPD_ ecn2 *a,ecn2 *b) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return FALSE; MR_IN(193) ecn2_norm(_MIPP_ a); ecn2_norm(_MIPP_ b); MR_OUT if (zzn2_compare(&(a->x),&(b->x)) && zzn2_compare(&(a->y),&(b->y)) && a->marker==b->marker) return TRUE; return FALSE; } void ecn2_norm(_MIPD_ ecn2 *a) { zzn2 t; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_AFFINE_ONLY if (mr_mip->ERNUM) return; if (a->marker!=MR_EPOINT_GENERAL) return; MR_IN(194) zzn2_inv_i(_MIPP_ &(a->z)); t.a=mr_mip->w3; t.b=mr_mip->w4; zzn2_copy_i(&(a->z),&t); zzn2_sqr_i( &(a->z),&(a->z)); zzn2_mul_i( &(a->x),&(a->z),&(a->x)); zzn2_mul_i( &(a->z),&t,&(a->z)); zzn2_mul_i( &(a->y),&(a->z),&(a->y)); zzn2_from_int(_MIPP_ 1,&(a->z)); a->marker=MR_EPOINT_NORMALIZED; MR_OUT #endif } void ecn2_get(_MIPD_ ecn2 *e,zzn2 *x,zzn2 *y,zzn2 *z) { zzn2_copy_i(&(e->x),x); zzn2_copy_i(&(e->y),y); #ifndef MR_AFFINE_ONLY if (e->marker==MR_EPOINT_GENERAL) zzn2_copy_i(&(e->z),z); else zzn2_from_zzn(mr_mip->one,z); #endif } void ecn2_getxy(ecn2 *e,zzn2 *x,zzn2 *y) { zzn2_copy_i(&(e->x),x); zzn2_copy_i(&(e->y),y); } void ecn2_getx(ecn2 *e,zzn2 *x) { zzn2_copy_i(&(e->x),x); } inline void zzn2_conj_i(zzn2 *x,zzn2 *w) { zzn2_copy_i(x,w); modneg(w->b->w); } void ecn2_psi(_MIPD_ zzn2 *psi,ecn2 *P) { ecn2_norm(_MIPP_ P); zzn2_conj_i(&(P->x),&(P->x)); zzn2_conj_i(&(P->y),&(P->y)); zzn2_mul_i(&(P->x),&psi[0],&(P->x)); zzn2_mul_i(&(P->y),&psi[1],&(P->y)); } #ifndef MR_AFFINE_ONLY void ecn2_getz(_MIPD_ ecn2 *e,zzn2 *z) { if (e->marker==MR_EPOINT_GENERAL) zzn2_copy_i(&(e->z),z); else zzn2_from_zzn(mr_mip->one,z); } #endif void ecn2_rhs(_MIPD_ zzn2 *x,zzn2 *rhs) { /* calculate RHS of elliptic curve equation */ BOOL twist; zzn2 A,B; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; twist=mr_mip->TWIST; MR_IN(202) A.a=mr_mip->w10; A.b=mr_mip->w11; B.a=mr_mip->w12; B.b=mr_mip->w13; if (mr_abs(mr_mip->Asize)<MR_TOOBIG) zzn2_from_int(_MIPP_ mr_mip->Asize,&A); else zzn2_from_zzn(mr_mip->A,&A); if (mr_abs(mr_mip->Bsize)<MR_TOOBIG) zzn2_from_int(_MIPP_ mr_mip->Bsize,&B); else zzn2_from_zzn(mr_mip->B,&B); if (twist) { if (mr_mip->Asize==0 || mr_mip->Bsize==0) { if (mr_mip->Asize==0) { zzn2_txd(_MIPP_ &B); } if (mr_mip->Bsize==0) { zzn2_mul_i( &A,x,&B); zzn2_txd(_MIPP_ &B); } zzn2_negate(_MIPP_ &B,&B); } else { zzn2_txx_i(&B); zzn2_txx_i(&B); zzn2_txx_i(&B); zzn2_mul_i( &A,x,&A); zzn2_txx_i(&A); zzn2_txx_i(&A); zzn2_add_i(&B,&A,&B); } } else { zzn2_mul_i( &A,x,&A); zzn2_add_i(&B,&A,&B); } zzn2_sqr_i( x,&A); zzn2_mul_i( &A,x,&A); zzn2_add_i(&B,&A,rhs); MR_OUT } BOOL ecn2_set(_MIPD_ zzn2 *x,zzn2 *y,ecn2 *e) { zzn2 lhs,rhs; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return FALSE; MR_IN(195) lhs.a=mr_mip->w10; lhs.b=mr_mip->w11; rhs.a=mr_mip->w12; rhs.b=mr_mip->w13; ecn2_rhs(_MIPP_ x,&rhs); zzn2_sqr_i( y,&lhs); if (!zzn2_compare(&lhs,&rhs)) { MR_OUT return FALSE; } zzn2_copy_i(x,&(e->x)); zzn2_copy_i(y,&(e->y)); e->marker=MR_EPOINT_NORMALIZED; MR_OUT return TRUE; } #ifndef MR_NOSUPPORT_COMPRESSION BOOL ecn2_setx(_MIPD_ zzn2 *x,ecn2 *e) { zzn2 rhs; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return FALSE; MR_IN(201) rhs.a=mr_mip->w12; rhs.b=mr_mip->w13; ecn2_rhs(_MIPP_ x,&rhs); if (!zzn2_iszero(&rhs)) { if (!zzn2_sqrt(_MIPP_ &rhs,&rhs)) { MR_OUT return FALSE; } } zzn2_copy_i(x,&(e->x)); zzn2_copy_i(&rhs,&(e->y)); e->marker=MR_EPOINT_NORMALIZED; MR_OUT return TRUE; } #endif #ifndef MR_AFFINE_ONLY void ecn2_setxyz(zzn2 *x,zzn2 *y,zzn2 *z,ecn2 *e) { zzn2_copy_i(x,&(e->x)); zzn2_copy_i(y,&(e->y)); zzn2_copy_i(z,&(e->z)); e->marker=MR_EPOINT_GENERAL; } #endif void ecn2_negate(_MIPD_ ecn2 *u,ecn2 *w) { ecn2_copy(u,w); if (!w->marker!=MR_EPOINT_INFINITY) zzn2_negate(_MIPP_ &(w->y),&(w->y)); } /* BOOL ecn2_add2(_MIPD_ ecn2 *Q,ecn2 *P,zzn2 *lam,zzn2 *ex1) { BOOL Doubling; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif Doubling=ecn2_add3(_MIPP_ Q,P,lam,ex1,NULL); return Doubling; } BOOL ecn2_add1(_MIPD_ ecn2 *Q,ecn2 *P,zzn2 *lam) { BOOL Doubling; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif Doubling=ecn2_add3(_MIPP_ Q,P,lam,NULL,NULL); return Doubling; } */ BOOL ecn2_sub(_MIPD_ ecn2 *Q,ecn2 *P) { BOOL Doubling; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif ecn2_negate(_MIPP_ Q,Q); Doubling=ecn2_add(_MIPP_ Q,P); ecn2_negate(_MIPP_ Q,Q); return Doubling; } /* static void zzn2_print(_MIPD_ char *label, zzn2 *x) { char s1[1024], s2[1024]; big a, b; #ifdef MR_STATIC char mem_big[MR_BIG_RESERVE(2)]; memset(mem_big, 0, MR_BIG_RESERVE(2)); a=mirvar_mem(_MIPP_ mem_big,0); b=mirvar_mem(_MIPP_ mem_big,1); #else a = mirvar(_MIPP_ 0); b = mirvar(_MIPP_ 0); #endif redc(_MIPP_ x->a, a); otstr(_MIPP_ a, s1); redc(_MIPP_ x->b, b); otstr(_MIPP_ b, s2); printf("%s: [%s,%s]\n", label, s1, s2); #ifndef MR_STATIC mr_free(a); mr_free(b); #endif } static void nres_print(_MIPD_ char *label, big x) { char s[1024]; big a; a = mirvar(_MIPP_ 0); redc(_MIPP_ x, a); otstr(_MIPP_ a, s); printf("%s: %s\n", label, s); mr_free(a); } */ BOOL ecn2_add_sub(_MIPD_ ecn2 *P,ecn2 *Q,ecn2 *PP,ecn2 *PM) { /* PP=P+Q, PM=P-Q. Assumes P and Q are both normalized, and P!=Q */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zzn2 t1,t2,lam; if (mr_mip->ERNUM) return FALSE; MR_IN(211) if (P->marker==MR_EPOINT_GENERAL || P->marker==MR_EPOINT_GENERAL) { /* Sorry, some restrictions.. */ mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS); MR_OUT return FALSE; } if (zzn2_compare(&(P->x),&(Q->x))) { /* P=Q or P=-Q - shouldn't happen */ ecn2_copy(P,PP); ecn2_add(_MIPP_ Q,PP); ecn2_copy(P,PM); ecn2_sub(_MIPP_ Q,PM); MR_OUT return TRUE; } t1.a = mr_mip->w8; t1.b = mr_mip->w9; t2.a = mr_mip->w10; t2.b = mr_mip->w11; lam.a = mr_mip->w12; lam.b = mr_mip->w13; zzn2_copy_i(&(P->x),&t2); zzn2_sub_i(&t2,&(Q->x),&t2); zzn2_inv_i(_MIPP_ &t2); /* only one inverse required */ zzn2_add_i(&(P->x),&(Q->x),&(PP->x)); zzn2_copy_i(&(PP->x),&(PM->x)); zzn2_copy_i(&(P->y),&t1); zzn2_sub_i(&t1,&(Q->y),&t1); zzn2_copy_i(&t1,&lam); zzn2_mul_i( &lam,&t2,&lam); zzn2_copy_i(&lam,&t1); zzn2_sqr_i( &t1,&t1); zzn2_sub_i(&t1,&(PP->x),&(PP->x)); zzn2_copy_i(&(Q->x),&(PP->y)); zzn2_sub_i(&(PP->y),&(PP->x),&(PP->y)); zzn2_mul_i( &(PP->y),&lam,&(PP->y)); zzn2_sub_i(&(PP->y),&(Q->y),&(PP->y)); zzn2_copy_i(&(P->y),&t1); zzn2_add_i(&t1,&(Q->y),&t1); zzn2_copy_i(&t1,&lam); zzn2_mul_i( &lam,&t2,&lam); zzn2_copy_i(&lam,&t1); zzn2_sqr_i( &t1,&t1); zzn2_sub_i(&t1,&(PM->x),&(PM->x)); zzn2_copy_i(&(Q->x),&(PM->y)); zzn2_sub_i(&(PM->y),&(PM->x),&(PM->y)); zzn2_mul_i( &(PM->y),&lam,&(PM->y)); zzn2_add_i(&(PM->y),&(Q->y),&(PM->y)); PP->marker=MR_EPOINT_NORMALIZED; PM->marker=MR_EPOINT_NORMALIZED; MR_OUT return TRUE; } BOOL ecn2_add(_MIPD_ ecn2 *Q,ecn2 *P) { /* P+=Q */ BOOL Doubling=FALSE; BOOL twist; int iA; zzn2 t1,t2,t3,lam; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif t1.a = mr_mip->w8; t1.b = mr_mip->w9; t2.a = mr_mip->w10; t2.b = mr_mip->w11; t3.a = mr_mip->w12; t3.b = mr_mip->w13; lam.a = mr_mip->w14; lam.b = mr_mip->w15; twist=mr_mip->TWIST; if (mr_mip->ERNUM) return FALSE; if (P->marker==MR_EPOINT_INFINITY) { ecn2_copy(Q,P); return Doubling; } if (Q->marker==MR_EPOINT_INFINITY) return Doubling; MR_IN(205) if (Q!=P && Q->marker==MR_EPOINT_GENERAL) { /* Sorry, this code is optimized for mixed addition only */ mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS); MR_OUT return Doubling; } #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) { #endif if (!zzn2_compare(&(P->x),&(Q->x))) { zzn2_copy_i(&(P->y),&t1); zzn2_sub_i(&t1,&(Q->y),&t1); zzn2_copy_i(&(P->x),&t2); zzn2_sub_i(&t2,&(Q->x),&t2); zzn2_copy_i(&t1,&lam); zzn2_inv_i(_MIPP_ &t2); zzn2_mul_i( &lam,&t2,&lam); zzn2_add_i(&(P->x),&(Q->x),&(P->x)); zzn2_copy_i(&lam,&t1); zzn2_sqr_i( &t1,&t1); zzn2_sub_i(&t1,&(P->x),&(P->x)); zzn2_copy_i(&(Q->x),&(P->y)); zzn2_sub_i(&(P->y),&(P->x),&(P->y)); zzn2_mul_i( &(P->y),&lam,&(P->y)); zzn2_sub_i(&(P->y),&(Q->y),&(P->y)); } else { if (!zzn2_compare(&(P->y),&(Q->y)) || zzn2_iszero(&(P->y))) { ecn2_zero(P); zzn2_from_int(_MIPP_ 1,&lam); MR_OUT return Doubling; } zzn2_copy_i(&(P->x),&t1); zzn2_copy_i(&(P->x),&t2); zzn2_copy_i(&(P->x),&lam); zzn2_sqr_i( &lam,&lam); zzn2_copy_i(&lam,&t3); zzn2_tim2_i(&t3); zzn2_add_i(&lam,&t3,&lam); if (mr_abs(mr_mip->Asize)<MR_TOOBIG) zzn2_from_int(_MIPP_ mr_mip->Asize,&t3); else zzn2_from_zzn(mr_mip->A,&t3); if (twist) { zzn2_txx_i(&t3); zzn2_txx_i(&t3); } zzn2_add_i(&lam,&t3,&lam); zzn2_copy_i(&(P->y),&t3); zzn2_tim2_i(&t3); zzn2_inv_i(_MIPP_ &t3); zzn2_mul_i( &lam,&t3,&lam); zzn2_add_i(&t2,&(P->x),&t2); zzn2_copy_i(&lam,&(P->x)); zzn2_sqr_i( &(P->x),&(P->x)); zzn2_sub_i(&(P->x),&t2,&(P->x)); zzn2_sub_i(&t1,&(P->x),&t1); zzn2_mul_i( &t1,&lam,&t1); zzn2_sub_i(&t1,&(P->y),&(P->y)); } #ifndef MR_AFFINE_ONLY zzn2_from_int(_MIPP_ 1,&(P->z)); #endif P->marker=MR_EPOINT_NORMALIZED; MR_OUT return Doubling; #ifndef MR_AFFINE_ONLY } if (Q==P) Doubling=TRUE; if (!Doubling) { if (P->marker!=MR_EPOINT_NORMALIZED) { zzn2_sqr_i(&(P->z),&t1); zzn2_mul_i(&t1,&(P->z),&t2); zzn2_mul_i(&t1,&(Q->x),&t1); zzn2_mul_i(&t2,&(Q->y),&t2); // zzn2_sqr_i( &(P->z),&t1); /* 1S */ // zzn2_mul_i( &t3,&t1,&t3); /* 1M */ // zzn2_mul_i( &t1,&(P->z),&t1); /* 1M */ // zzn2_mul_i( &Yzzz,&t1,&Yzzz); /* 1M */ } else { zzn2_copy(&(Q->x),&t1); zzn2_copy(&(Q->y),&t2); } if (zzn2_compare(&t1,&(P->x))) /*?*/ { if (!zzn2_compare(&t2,&(P->y)) || zzn2_iszero(&(P->y))) { ecn2_zero(P); zzn2_from_int(_MIPP_ 1,&lam); MR_OUT return Doubling; } else Doubling=TRUE; } } if (!Doubling) { /* Addition */ zzn2_sub_i(&t1,&(P->x),&t1); zzn2_sub_i(&t2,&(P->y),&t2); if (P->marker==MR_EPOINT_NORMALIZED) zzn2_copy_i(&t1,&(P->z)); else zzn2_mul_i(&(P->z),&t1,&(P->z)); zzn2_sqr_i(&t1,&t3); zzn2_mul_i(&t3,&t1,&lam); zzn2_mul_i(&t3,&(P->x),&t3); zzn2_copy_i(&t3,&t1); zzn2_tim2_i(&t1); zzn2_sqr_i(&t2,&(P->x)); zzn2_dblsub_i(&t1,&lam,&(P->x)); zzn2_sub_i(&t3,&(P->x),&t3); zzn2_mul_i(&t3,&t2,&t3); zzn2_mul_i(&lam,&(P->y),&lam); zzn2_sub_i(&t3,&lam,&(P->y)); } else { /* doubling */ if (P->marker==MR_EPOINT_NORMALIZED) zzn2_from_int(_MIPP_ 1,&t1); else zzn2_sqr_i(&(P->z),&t1); if (twist) zzn2_txx_i(&t1); zzn2_sub_i(&(P->x),&t1,&t2); zzn2_add_i(&t1,&(P->x),&t1); zzn2_mul_i(&t2,&t1,&t2); zzn2_tim3_i(&t2); zzn2_tim2_i(&(P->y)); if (P->marker==MR_EPOINT_NORMALIZED) zzn2_copy_i(&(P->y),&(P->z)); else zzn2_mul_i(&(P->z),&(P->y),&(P->z)); zzn2_sqr_i(&(P->y),&(P->y)); zzn2_mul_i(&(P->y),&(P->x),&t3); zzn2_sqr_i(&(P->y),&(P->y)); zzn2_div2_i(&(P->y)); zzn2_sqr_i(&t2,&(P->x)); zzn2_copy_i(&t3,&t1); zzn2_tim2_i(&t1); zzn2_sub_i(&(P->x),&t1,&(P->x)); zzn2_sub_i(&t3,&(P->x),&t1); zzn2_mul_i(&t1,&t2,&t1); zzn2_sub_i(&t1,&(P->y),&(P->y)); } P->marker=MR_EPOINT_GENERAL; MR_OUT return Doubling; #endif } static int calc_n(int w) { /* number of precomputed values needed for given window size */ if (w==3) return 3; if (w==4) return 5; if (w==5) return 11; if (w==6) return 41; return 0; } /* Dahmen, Okeya and Schepers "Affine Precomputation with Sole Inversion in Elliptic Curve Cryptography" */ /* Precomputes table into T. Assumes first P has been copied to P[0], then calculates 3P, 5P, 7P etc. into T */ #define MR_DOS_2 (14+4*MR_STR_SZ_2P) static void ecn2_dos(_MIPD_ int win,ecn2 *PT) { BOOL twist; int i,j,sz; zzn2 A,B,C,D,E,T,W,d[MR_STR_SZ_2P],e[MR_STR_SZ_2P]; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_STATIC char *mem = memalloc(_MIPP_ MR_DOS_2); #else char mem[MR_BIG_RESERVE(MR_DOS_2)]; memset(mem, 0, MR_BIG_RESERVE(MR_DOS_2)); #endif twist=mr_mip->TWIST; j=0; sz=calc_n(win); A.a= mirvar_mem(_MIPP_ mem, j++); A.b= mirvar_mem(_MIPP_ mem, j++); B.a= mirvar_mem(_MIPP_ mem, j++); B.b= mirvar_mem(_MIPP_ mem, j++); C.a= mirvar_mem(_MIPP_ mem, j++); C.b= mirvar_mem(_MIPP_ mem, j++); D.a= mirvar_mem(_MIPP_ mem, j++); D.b= mirvar_mem(_MIPP_ mem, j++); E.a= mirvar_mem(_MIPP_ mem, j++); E.b= mirvar_mem(_MIPP_ mem, j++); T.a= mirvar_mem(_MIPP_ mem, j++); T.b= mirvar_mem(_MIPP_ mem, j++); W.a= mirvar_mem(_MIPP_ mem, j++); W.b= mirvar_mem(_MIPP_ mem, j++); for (i=0;i<sz;i++) { d[i].a= mirvar_mem(_MIPP_ mem, j++); d[i].b= mirvar_mem(_MIPP_ mem, j++); e[i].a= mirvar_mem(_MIPP_ mem, j++); e[i].b= mirvar_mem(_MIPP_ mem, j++); } zzn2_add_i(&(PT[0].y),&(PT[0].y),&d[0]); /* 1. d_0=2.y */ zzn2_sqr_i(&d[0],&C); /* 2. C=d_0^2 */ zzn2_sqr_i(&(PT[0].x),&T); zzn2_add_i(&T,&T,&A); zzn2_add_i(&T,&A,&T); if (mr_abs(mr_mip->Asize)<MR_TOOBIG) zzn2_from_int(_MIPP_ mr_mip->Asize,&A); else zzn2_from_zzn(mr_mip->A,&A); if (twist) { zzn2_txx_i(&A); zzn2_txx_i(&A); } zzn2_add_i(&A,&T,&A); /* 3. A=3x^2+a */ zzn2_copy_i(&A,&W); zzn2_add_i(&C,&C,&B); zzn2_add_i(&B,&C,&B); zzn2_mul_i(&B,&(PT[0].x),&B); /* 4. B=3C.x */ zzn2_sqr_i(&A,&d[1]); zzn2_sub_i(&d[1],&B,&d[1]); /* 5. d_1=A^2-B */ zzn2_sqr_i(&d[1],&E); /* 6. E=d_1^2 */ zzn2_mul_i(&B,&E,&B); /* 7. B=E.B */ zzn2_sqr_i(&C,&C); /* 8. C=C^2 */ zzn2_mul_i(&E,&d[1],&D); /* 9. D=E.d_1 */ zzn2_mul_i(&A,&d[1],&A); zzn2_add_i(&A,&C,&A); zzn2_negate(_MIPP_ &A,&A); /* 10. A=-d_1*A-C */ zzn2_add_i(&D,&D,&T); zzn2_sqr_i(&A,&d[2]); zzn2_sub_i(&d[2],&T,&d[2]); zzn2_sub_i(&d[2],&B,&d[2]); /* 11. d_2=A^2-2D-B */ if (sz>3) { zzn2_sqr_i(&d[2],&E); /* 12. E=d_2^2 */ zzn2_add_i(&T,&D,&T); zzn2_add_i(&T,&B,&T); zzn2_mul_i(&T,&E,&B); /* 13. B=E(B+3D) */ zzn2_add_i(&A,&A,&T); zzn2_add_i(&C,&T,&C); zzn2_mul_i(&C,&D,&C); /* 14. C=D(2A+C) */ zzn2_mul_i(&d[2],&E,&D); /* 15. D=E.d_2 */ zzn2_mul_i(&A,&d[2],&A); zzn2_add_i(&A,&C,&A); zzn2_negate(_MIPP_ &A,&A); /* 16. A=-d_2*A-C */ zzn2_sqr_i(&A,&d[3]); zzn2_sub_i(&d[3],&D,&d[3]); zzn2_sub_i(&d[3],&B,&d[3]); /* 17. d_3=A^2-D-B */ for (i=4;i<sz;i++) { zzn2_sqr_i(&d[i-1],&E); /* 19. E=d(i-1)^2 */ zzn2_mul_i(&B,&E,&B); /* 20. B=E.B */ zzn2_mul_i(&C,&D,&C); /* 21. C=D.C */ zzn2_mul_i(&E,&d[i-1],&D); /* 22. D=E.d(i-1) */ zzn2_mul_i(&A,&d[i-1],&A); zzn2_add_i(&A,&C,&A); zzn2_negate(_MIPP_ &A,&A); /* 23. A=-d(i-1)*A-C */ zzn2_sqr_i(&A,&d[i]); zzn2_sub_i(&d[i],&D,&d[i]); zzn2_sub_i(&d[i],&B,&d[i]); /* 24. d(i)=A^2-D-B */ } } zzn2_copy_i(&d[0],&e[0]); for (i=1;i<sz;i++) zzn2_mul_i(&e[i-1],&d[i],&e[i]); zzn2_copy_i(&e[sz-1],&A); zzn2_inv_i(_MIPP_ &A); for (i=sz-1;i>0;i--) { zzn2_copy_i(&d[i],&B); zzn2_mul_i(&e[i-1],&A,&d[i]); zzn2_mul_i(&A,&B,&A); } zzn2_copy_i(&A,&d[0]); for (i=1;i<sz;i++) { zzn2_sqr_i(&e[i-1],&T); zzn2_mul_i(&d[i],&T,&d[i]); /** */ } zzn2_mul_i(&W,&d[0],&W); zzn2_sqr_i(&W,&A); zzn2_sub_i(&A,&(PT[0].x),&A); zzn2_sub_i(&A,&(PT[0].x),&A); zzn2_sub_i(&(PT[0].x),&A,&B); zzn2_mul_i(&B,&W,&B); zzn2_sub_i(&B,&(PT[0].y),&B); zzn2_sub_i(&B,&(PT[0].y),&T); zzn2_mul_i(&T,&d[1],&T); zzn2_sqr_i(&T,&(PT[1].x)); zzn2_sub_i(&(PT[1].x),&A,&(PT[1].x)); zzn2_sub_i(&(PT[1].x),&(PT[0].x),&(PT[1].x)); zzn2_sub_i(&A,&(PT[1].x),&(PT[1].y)); zzn2_mul_i(&(PT[1].y),&T,&(PT[1].y)); zzn2_sub_i(&(PT[1].y),&B,&(PT[1].y)); for (i=2;i<sz;i++) { zzn2_sub_i(&(PT[i-1].y),&B,&T); zzn2_mul_i(&T,&d[i],&T); zzn2_sqr_i(&T,&(PT[i].x)); zzn2_sub_i(&(PT[i].x),&A,&(PT[i].x)); zzn2_sub_i(&(PT[i].x),&(PT[i-1].x),&(PT[i].x)); zzn2_sub_i(&A,&(PT[i].x),&(PT[i].y)); zzn2_mul_i(&(PT[i].y),&T,&(PT[i].y)); zzn2_sub_i(&(PT[i].y),&B,&(PT[i].y)); } for (i=0;i<sz;i++) PT[i].marker=MR_EPOINT_NORMALIZED; #ifndef MR_STATIC memkill(_MIPP_ mem, MR_DOS_2); #else memset(mem, 0, MR_BIG_RESERVE(MR_DOS_2)); #endif } #ifndef MR_DOUBLE_BIG #define MR_MUL_RESERVE (1+4*MR_STR_SZ_2) #else #define MR_MUL_RESERVE (2+4*MR_STR_SZ_2) #endif int ecn2_mul(_MIPD_ big k,ecn2 *P) { int i,j,nb,n,nbs,nzs,nadds; big h; ecn2 T[MR_STR_SZ_2]; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_STATIC char *mem = memalloc(_MIPP_ MR_MUL_RESERVE); #else char mem[MR_BIG_RESERVE(MR_MUL_RESERVE)]; memset(mem, 0, MR_BIG_RESERVE(MR_MUL_RESERVE)); #endif j=0; #ifndef MR_DOUBLE_BIG h=mirvar_mem(_MIPP_ mem, j++); #else h=mirvar_mem(_MIPP_ mem, j); j+=2; #endif for (i=0;i<MR_STR_SZ_2;i++) { T[i].x.a= mirvar_mem(_MIPP_ mem, j++); T[i].x.b= mirvar_mem(_MIPP_ mem, j++); T[i].y.a= mirvar_mem(_MIPP_ mem, j++); T[i].y.b= mirvar_mem(_MIPP_ mem, j++); } MR_IN(207) ecn2_norm(_MIPP_ P); nadds=0; premult(_MIPP_ k,3,h); ecn2_copy(P,&T[0]); ecn2_dos(_MIPP_ MR_WIN_SZ_2,T); nb=logb2(_MIPP_ h); for (i=nb-2;i>=1;) { if (mr_mip->user!=NULL) (*mr_mip->user)(); n=mr_naf_window(_MIPP_ k,h,i,&nbs,&nzs,MR_WIN_SZ_2); for (j=0;j<nbs;j++) ecn2_add(_MIPP_ P,P); if (n>0) {nadds++; ecn2_add(_MIPP_ &T[n/2],P);} if (n<0) {nadds++; ecn2_sub(_MIPP_ &T[(-n)/2],P);} i-=nbs; if (nzs) { for (j=0;j<nzs;j++) ecn2_add(_MIPP_ P,P); i-=nzs; } } ecn2_norm(_MIPP_ P); MR_OUT #ifndef MR_STATIC memkill(_MIPP_ mem, MR_MUL_RESERVE); #else memset(mem, 0, MR_BIG_RESERVE(MR_MUL_RESERVE)); #endif return nadds; } /* Double addition, using Joint Sparse Form */ /* R=aP+bQ */ #define MR_MUL2_JSF_RESERVE 20 int ecn2_mul2_jsf(_MIPD_ big a,ecn2 *P,big b,ecn2 *Q,ecn2 *R) { int e1,h1,e2,h2,bb,nadds; ecn2 P1,P2,PS,PD; big c,d,e,f; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_STATIC char *mem = memalloc(_MIPP_ MR_MUL2_JSF_RESERVE); #else char mem[MR_BIG_RESERVE(MR_MUL2_JSF_RESERVE)]; memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_JSF_RESERVE)); #endif c = mirvar_mem(_MIPP_ mem, 0); d = mirvar_mem(_MIPP_ mem, 1); e = mirvar_mem(_MIPP_ mem, 2); f = mirvar_mem(_MIPP_ mem, 3); P1.x.a= mirvar_mem(_MIPP_ mem, 4); P1.x.b= mirvar_mem(_MIPP_ mem, 5); P1.y.a= mirvar_mem(_MIPP_ mem, 6); P1.y.b= mirvar_mem(_MIPP_ mem, 7); P2.x.a= mirvar_mem(_MIPP_ mem, 8); P2.x.b= mirvar_mem(_MIPP_ mem, 9); P2.y.a= mirvar_mem(_MIPP_ mem, 10); P2.y.b= mirvar_mem(_MIPP_ mem, 11); PS.x.a= mirvar_mem(_MIPP_ mem, 12); PS.x.b= mirvar_mem(_MIPP_ mem, 13); PS.y.a= mirvar_mem(_MIPP_ mem, 14); PS.y.b= mirvar_mem(_MIPP_ mem, 15); PD.x.a= mirvar_mem(_MIPP_ mem, 16); PD.x.b= mirvar_mem(_MIPP_ mem, 17); PD.y.a= mirvar_mem(_MIPP_ mem, 18); PD.y.b= mirvar_mem(_MIPP_ mem, 19); MR_IN(206) ecn2_norm(_MIPP_ Q); ecn2_copy(Q,&P2); copy(b,d); if (size(d)<0) { negify(d,d); ecn2_negate(_MIPP_ &P2,&P2); } ecn2_norm(_MIPP_ P); ecn2_copy(P,&P1); copy(a,c); if (size(c)<0) { negify(c,c); ecn2_negate(_MIPP_ &P1,&P1); } mr_jsf(_MIPP_ d,c,e,d,f,c); /* calculate joint sparse form */ if (mr_compare(e,f)>0) bb=logb2(_MIPP_ e)-1; else bb=logb2(_MIPP_ f)-1; ecn2_add_sub(_MIPP_ &P1,&P2,&PS,&PD); ecn2_zero(R); nadds=0; while (bb>=0) { /* add/subtract method */ if (mr_mip->user!=NULL) (*mr_mip->user)(); ecn2_add(_MIPP_ R,R); e1=h1=e2=h2=0; if (mr_testbit(_MIPP_ d,bb)) e2=1; if (mr_testbit(_MIPP_ e,bb)) h2=1; if (mr_testbit(_MIPP_ c,bb)) e1=1; if (mr_testbit(_MIPP_ f,bb)) h1=1; if (e1!=h1) { if (e2==h2) { if (h1==1) {ecn2_add(_MIPP_ &P1,R); nadds++;} else {ecn2_sub(_MIPP_ &P1,R); nadds++;} } else { if (h1==1) { if (h2==1) {ecn2_add(_MIPP_ &PS,R); nadds++;} else {ecn2_add(_MIPP_ &PD,R); nadds++;} } else { if (h2==1) {ecn2_sub(_MIPP_ &PD,R); nadds++;} else {ecn2_sub(_MIPP_ &PS,R); nadds++;} } } } else if (e2!=h2) { if (h2==1) {ecn2_add(_MIPP_ &P2,R); nadds++;} else {ecn2_sub(_MIPP_ &P2,R); nadds++;} } bb-=1; } ecn2_norm(_MIPP_ R); MR_OUT #ifndef MR_STATIC memkill(_MIPP_ mem, MR_MUL2_JSF_RESERVE); #else memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_JSF_RESERVE)); #endif return nadds; } /* General purpose multi-exponentiation engine, using inter-leaving algorithm. Calculate aP+bQ+cR+dS... Inputs are divided into two groups of sizes wa<4 and wb<4. For the first group if the points are fixed the first precomputed Table Ta[] may be taken from ROM. For the second group if the points are variable Tb[j] will have to computed online. Each group has its own window size, wina (=5?) and winb (=4?) respectively. The values a,b,c.. are provided in ma[] and mb[], and 3.a,3.b,3.c (as required by the NAF) are provided in ma3[] and mb3[]. If only one group is required, set wb=0 and pass NULL pointers. */ int ecn2_muln_engine(_MIPD_ int wa,int wina,int wb,int winb,big *ma,big *ma3,big *mb,big *mb3,ecn2 *Ta,ecn2 *Tb,ecn2 *R) { /* general purpose interleaving algorithm engine for multi-exp */ int i,j,tba[4],pba[4],na[4],sa[4],tbb[4],pbb[4],nb[4],sb[4],nbits,nbs,nzs; int sza,szb,nadds; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif sza=calc_n(wina); szb=calc_n(winb); ecn2_zero(R); nbits=0; for (i=0;i<wa;i++) {sa[i]=exsign(ma[i]); tba[i]=0; j=logb2(_MIPP_ ma3[i]); if (j>nbits) nbits=j; } for (i=0;i<wb;i++) {sb[i]=exsign(mb[i]); tbb[i]=0; j=logb2(_MIPP_ mb3[i]); if (j>nbits) nbits=j; } nadds=0; for (i=nbits-1;i>=1;i--) { if (mr_mip->user!=NULL) (*mr_mip->user)(); if (R->marker!=MR_EPOINT_INFINITY) ecn2_add(_MIPP_ R,R); for (j=0;j<wa;j++) { /* deal with the first group */ if (tba[j]==0) { na[j]=mr_naf_window(_MIPP_ ma[j],ma3[j],i,&nbs,&nzs,wina); tba[j]=nbs+nzs; pba[j]=nbs; } tba[j]--; pba[j]--; if (pba[j]==0) { if (sa[j]==PLUS) { if (na[j]>0) {ecn2_add(_MIPP_ &Ta[j*sza+na[j]/2],R); nadds++;} if (na[j]<0) {ecn2_sub(_MIPP_ &Ta[j*sza+(-na[j])/2],R); nadds++;} } else { if (na[j]>0) {ecn2_sub(_MIPP_ &Ta[j*sza+na[j]/2],R); nadds++;} if (na[j]<0) {ecn2_add(_MIPP_ &Ta[j*sza+(-na[j])/2],R); nadds++;} } } } for (j=0;j<wb;j++) { /* deal with the second group */ if (tbb[j]==0) { nb[j]=mr_naf_window(_MIPP_ mb[j],mb3[j],i,&nbs,&nzs,winb); tbb[j]=nbs+nzs; pbb[j]=nbs; } tbb[j]--; pbb[j]--; if (pbb[j]==0) { if (sb[j]==PLUS) { if (nb[j]>0) {ecn2_add(_MIPP_ &Tb[j*szb+nb[j]/2],R); nadds++;} if (nb[j]<0) {ecn2_sub(_MIPP_ &Tb[j*szb+(-nb[j])/2],R); nadds++;} } else { if (nb[j]>0) {ecn2_sub(_MIPP_ &Tb[j*szb+nb[j]/2],R); nadds++;} if (nb[j]<0) {ecn2_add(_MIPP_ &Tb[j*szb+(-nb[j])/2],R); nadds++;} } } } } ecn2_norm(_MIPP_ R); return nadds; } /* Routines to support Galbraith, Lin, Scott (GLS) method for ECC */ /* requires an endomorphism psi */ /* *********************** */ /* Precompute T - first half from i.P, second half from i.psi(P) */ void ecn2_precomp_gls(_MIPD_ int win,ecn2 *P,zzn2 *psi,ecn2 *T) { int i,j,sz; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif j=0; sz=calc_n(win); MR_IN(219) ecn2_norm(_MIPP_ P); ecn2_copy(P,&T[0]); ecn2_dos(_MIPP_ win,T); /* precompute table */ for (i=sz;i<sz+sz;i++) { ecn2_copy(&T[i-sz],&T[i]); ecn2_psi(_MIPP_ psi,&T[i]); } MR_OUT } /* Calculate a[0].P+a[1].psi(P) using interleaving method */ #define MR_MUL2_GLS_RESERVE (2+2*MR_STR_SZ_2*4) int ecn2_mul2_gls(_MIPD_ big *a,ecn2 *P,zzn2 *psi,ecn2 *R) { int i,j,nadds; ecn2 T[2*MR_STR_SZ_2]; big a3[2]; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_STATIC char *mem = memalloc(_MIPP_ MR_MUL2_GLS_RESERVE); #else char mem[MR_BIG_RESERVE(MR_MUL2_GLS_RESERVE)]; memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_GLS_RESERVE)); #endif for (j=i=0;i<2;i++) a3[i]=mirvar_mem(_MIPP_ mem, j++); for (i=0;i<2*MR_STR_SZ_2;i++) { T[i].x.a=mirvar_mem(_MIPP_ mem, j++); T[i].x.b=mirvar_mem(_MIPP_ mem, j++); T[i].y.a=mirvar_mem(_MIPP_ mem, j++); T[i].y.b=mirvar_mem(_MIPP_ mem, j++); T[i].marker=MR_EPOINT_INFINITY; } MR_IN(220) ecn2_precomp_gls(_MIPP_ MR_WIN_SZ_2,P,psi,T); for (i=0;i<2;i++) premult(_MIPP_ a[i],3,a3[i]); /* calculate for NAF */ nadds=ecn2_muln_engine(_MIPP_ 0,0,2,MR_WIN_SZ_2,NULL,NULL,a,a3,NULL,T,R); ecn2_norm(_MIPP_ R); MR_OUT #ifndef MR_STATIC memkill(_MIPP_ mem, MR_MUL2_GLS_RESERVE); #else memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_GLS_RESERVE)); #endif return nadds; } /* Calculates a[0]*P+a[1]*psi(P) + b[0]*Q+b[1]*psi(Q) where P is fixed, and precomputations are already done off-line into FT using ecn2_precomp_gls. Useful for signature verification */ #define MR_MUL4_GLS_V_RESERVE (4+2*MR_STR_SZ_2*4) int ecn2_mul4_gls_v(_MIPD_ big *a,ecn2 *FT,big *b,ecn2 *Q,zzn2 *psi,ecn2 *R) { int i,j,nadds; ecn2 VT[2*MR_STR_SZ_2]; big a3[2],b3[2]; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_STATIC char *mem = memalloc(_MIPP_ MR_MUL4_GLS_V_RESERVE); #else char mem[MR_BIG_RESERVE(MR_MUL4_GLS_V_RESERVE)]; memset(mem, 0, MR_BIG_RESERVE(MR_MUL4_GLS_V_RESERVE)); #endif j=0; for (i=0;i<2;i++) { a3[i]=mirvar_mem(_MIPP_ mem, j++); b3[i]=mirvar_mem(_MIPP_ mem, j++); } for (i=0;i<2*MR_STR_SZ_2;i++) { VT[i].x.a=mirvar_mem(_MIPP_ mem, j++); VT[i].x.b=mirvar_mem(_MIPP_ mem, j++); VT[i].y.a=mirvar_mem(_MIPP_ mem, j++); VT[i].y.b=mirvar_mem(_MIPP_ mem, j++); VT[i].marker=MR_EPOINT_INFINITY; } MR_IN(217) ecn2_precomp_gls(_MIPP_ MR_WIN_SZ_2,Q,psi,VT); /* precompute for the variable points */ for (i=0;i<2;i++) { /* needed for NAF */ premult(_MIPP_ a[i],3,a3[i]); premult(_MIPP_ b[i],3,b3[i]); } nadds=ecn2_muln_engine(_MIPP_ 2,MR_WIN_SZ_2P,2,MR_WIN_SZ_2,a,a3,b,b3,FT,VT,R); ecn2_norm(_MIPP_ R); MR_OUT #ifndef MR_STATIC memkill(_MIPP_ mem, MR_MUL4_GLS_V_RESERVE); #else memset(mem, 0, MR_BIG_RESERVE(MR_MUL4_GLS_V_RESERVE)); #endif return nadds; } /* Calculate a.P+b.Q using interleaving method. P is fixed and FT is precomputed from it */ void ecn2_precomp(_MIPD_ int win,ecn2 *P,ecn2 *T) { int sz; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif sz=calc_n(win); MR_IN(216) ecn2_norm(_MIPP_ P); ecn2_copy(P,&T[0]); ecn2_dos(_MIPP_ win,T); MR_OUT } #ifndef MR_DOUBLE_BIG #define MR_MUL2_RESERVE (2+2*MR_STR_SZ_2*4) #else #define MR_MUL2_RESERVE (4+2*MR_STR_SZ_2*4) #endif int ecn2_mul2(_MIPD_ big a,ecn2 *FT,big b,ecn2 *Q,ecn2 *R) { int i,j,nadds; ecn2 T[2*MR_STR_SZ_2]; big a3,b3; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_STATIC char *mem = memalloc(_MIPP_ MR_MUL2_RESERVE); #else char mem[MR_BIG_RESERVE(MR_MUL2_RESERVE)]; memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_RESERVE)); #endif j=0; #ifndef MR_DOUBLE_BIG a3=mirvar_mem(_MIPP_ mem, j++); b3=mirvar_mem(_MIPP_ mem, j++); #else a3=mirvar_mem(_MIPP_ mem, j); j+=2; b3=mirvar_mem(_MIPP_ mem, j); j+=2; #endif for (i=0;i<2*MR_STR_SZ_2;i++) { T[i].x.a=mirvar_mem(_MIPP_ mem, j++); T[i].x.b=mirvar_mem(_MIPP_ mem, j++); T[i].y.a=mirvar_mem(_MIPP_ mem, j++); T[i].y.b=mirvar_mem(_MIPP_ mem, j++); T[i].marker=MR_EPOINT_INFINITY; } MR_IN(218) ecn2_precomp(_MIPP_ MR_WIN_SZ_2,Q,T); premult(_MIPP_ a,3,a3); premult(_MIPP_ b,3,b3); nadds=ecn2_muln_engine(_MIPP_ 1,MR_WIN_SZ_2P,1,MR_WIN_SZ_2,&a,&a3,&b,&b3,FT,T,R); ecn2_norm(_MIPP_ R); MR_OUT #ifndef MR_STATIC memkill(_MIPP_ mem, MR_MUL2_RESERVE); #else memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_RESERVE)); #endif return nadds; } #ifndef MR_STATIC BOOL ecn2_brick_init(_MIPD_ ebrick *B,zzn2 *x,zzn2 *y,big a,big b,big n,int window,int nb) { /* Uses Montgomery arithmetic internally * * (x,y) is the fixed base * * a,b and n are parameters and modulus of the curve * * window is the window size in bits and * * nb is the maximum number of bits in the multiplier */ int i,j,k,t,bp,len,bptr; ecn2 *table; ecn2 w; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (nb<2 || window<1 || window>nb || mr_mip->ERNUM) return FALSE; t=MR_ROUNDUP(nb,window); if (t<2) return FALSE; MR_IN(221) #ifndef MR_ALWAYS_BINARY if (mr_mip->base != mr_mip->base2) { mr_berror(_MIPP_ MR_ERR_NOT_SUPPORTED); MR_OUT return FALSE; } #endif B->window=window; B->max=nb; table=mr_alloc(_MIPP_ (1<<window),sizeof(ecn2)); if (table==NULL) { mr_berror(_MIPP_ MR_ERR_OUT_OF_MEMORY); MR_OUT return FALSE; } B->a=mirvar(_MIPP_ 0); B->b=mirvar(_MIPP_ 0); B->n=mirvar(_MIPP_ 0); copy(a,B->a); copy(b,B->b); copy(n,B->n); ecurve_init(_MIPP_ a,b,n,MR_AFFINE); mr_mip->TWIST=TRUE; w.x.a=mirvar(_MIPP_ 0); w.x.b=mirvar(_MIPP_ 0); w.y.a=mirvar(_MIPP_ 0); w.y.b=mirvar(_MIPP_ 0); w.marker=MR_EPOINT_INFINITY; ecn2_set(_MIPP_ x,y,&w); table[0].x.a=mirvar(_MIPP_ 0); table[0].x.b=mirvar(_MIPP_ 0); table[0].y.a=mirvar(_MIPP_ 0); table[0].y.b=mirvar(_MIPP_ 0); table[0].marker=MR_EPOINT_INFINITY; table[1].x.a=mirvar(_MIPP_ 0); table[1].x.b=mirvar(_MIPP_ 0); table[1].y.a=mirvar(_MIPP_ 0); table[1].y.b=mirvar(_MIPP_ 0); table[1].marker=MR_EPOINT_INFINITY; ecn2_copy(&w,&table[1]); for (j=0;j<t;j++) ecn2_add(_MIPP_ &w,&w); k=1; for (i=2;i<(1<<window);i++) { table[i].x.a=mirvar(_MIPP_ 0); table[i].x.b=mirvar(_MIPP_ 0); table[i].y.a=mirvar(_MIPP_ 0); table[i].y.b=mirvar(_MIPP_ 0); table[i].marker=MR_EPOINT_INFINITY; if (i==(1<<k)) { k++; ecn2_copy(&w,&table[i]); for (j=0;j<t;j++) ecn2_add(_MIPP_ &w,&w); continue; } bp=1; for (j=0;j<k;j++) { if (i&bp) ecn2_add(_MIPP_ &table[1<<j],&table[i]); bp<<=1; } } mr_free(w.x.a); mr_free(w.x.b); mr_free(w.y.a); mr_free(w.y.b); /* create the table */ len=n->len; bptr=0; B->table=mr_alloc(_MIPP_ 4*len*(1<<window),sizeof(mr_small)); for (i=0;i<(1<<window);i++) { for (j=0;j<len;j++) B->table[bptr++]=table[i].x.a->w[j]; for (j=0;j<len;j++) B->table[bptr++]=table[i].x.b->w[j]; for (j=0;j<len;j++) B->table[bptr++]=table[i].y.a->w[j]; for (j=0;j<len;j++) B->table[bptr++]=table[i].y.b->w[j]; mr_free(table[i].x.a); mr_free(table[i].x.b); mr_free(table[i].y.a); mr_free(table[i].y.b); } mr_free(table); MR_OUT return TRUE; } void ecn2_brick_end(ebrick *B) { mirkill(B->n); mirkill(B->b); mirkill(B->a); mr_free(B->table); } #else /* use precomputated table in ROM */ void ecn2_brick_init(ebrick *B,const mr_small* rom,big a,big b,big n,int window,int nb) { B->table=rom; B->a=a; /* just pass a pointer */ B->b=b; B->n=n; B->window=window; /* 2^4=16 stored values */ B->max=nb; } #endif /* void ecn2_mul_brick(_MIPD_ ebrick *B,big e,zzn2 *x,zzn2 *y) { int i,j,t,len,maxsize,promptr; ecn2 w,z; #ifdef MR_STATIC char mem[MR_BIG_RESERVE(10)]; #else char *mem; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (size(e)<0) mr_berror(_MIPP_ MR_ERR_NEG_POWER); t=MR_ROUNDUP(B->max,B->window); MR_IN(116) #ifndef MR_ALWAYS_BINARY if (mr_mip->base != mr_mip->base2) { mr_berror(_MIPP_ MR_ERR_NOT_SUPPORTED); MR_OUT return; } #endif if (logb2(_MIPP_ e) > B->max) { mr_berror(_MIPP_ MR_ERR_EXP_TOO_BIG); MR_OUT return; } ecurve_init(_MIPP_ B->a,B->b,B->n,MR_BEST); mr_mip->TWIST=TRUE; #ifdef MR_STATIC memset(mem,0,MR_BIG_RESERVE(10)); #else mem=memalloc(_MIPP_ 10); #endif w.x.a=mirvar_mem(_MIPP_ mem, 0); w.x.b=mirvar_mem(_MIPP_ mem, 1); w.y.a=mirvar_mem(_MIPP_ mem, 2); w.y.b=mirvar_mem(_MIPP_ mem, 3); w.z.a=mirvar_mem(_MIPP_ mem, 4); w.z.b=mirvar_mem(_MIPP_ mem, 5); w.marker=MR_EPOINT_INFINITY; z.x.a=mirvar_mem(_MIPP_ mem, 6); z.x.b=mirvar_mem(_MIPP_ mem, 7); z.y.a=mirvar_mem(_MIPP_ mem, 8); z.y.b=mirvar_mem(_MIPP_ mem, 9); z.marker=MR_EPOINT_INFINITY; len=B->n->len; maxsize=4*(1<<B->window)*len; for (i=t-1;i>=0;i--) { j=recode(_MIPP_ e,t,B->window,i); ecn2_add(_MIPP_ &w,&w); if (j>0) { promptr=4*j*len; init_big_from_rom(z.x.a,len,B->table,maxsize,&promptr); init_big_from_rom(z.x.b,len,B->table,maxsize,&promptr); init_big_from_rom(z.y.a,len,B->table,maxsize,&promptr); init_big_from_rom(z.y.b,len,B->table,maxsize,&promptr); z.marker=MR_EPOINT_NORMALIZED; ecn2_add(_MIPP_ &z,&w); } } ecn2_norm(_MIPP_ &w); ecn2_getxy(&w,x,y); #ifndef MR_STATIC memkill(_MIPP_ mem,10); #else memset(mem,0,MR_BIG_RESERVE(10)); #endif MR_OUT } */ void ecn2_mul_brick_gls(_MIPD_ ebrick *B,big *e,zzn2 *psi,zzn2 *x,zzn2 *y) { int i,j,k,t,len,maxsize,promptr,se[2]; ecn2 w,z; #ifdef MR_STATIC char mem[MR_BIG_RESERVE(10)]; #else char *mem; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif for (k=0;k<2;k++) se[k]=exsign(e[k]); t=MR_ROUNDUP(B->max,B->window); MR_IN(222) #ifndef MR_ALWAYS_BINARY if (mr_mip->base != mr_mip->base2) { mr_berror(_MIPP_ MR_ERR_NOT_SUPPORTED); MR_OUT return; } #endif if (logb2(_MIPP_ e[0])>B->max || logb2(_MIPP_ e[1])>B->max) { mr_berror(_MIPP_ MR_ERR_EXP_TOO_BIG); MR_OUT return; } ecurve_init(_MIPP_ B->a,B->b,B->n,MR_BEST); mr_mip->TWIST=TRUE; #ifdef MR_STATIC memset(mem,0,MR_BIG_RESERVE(10)); #else mem=memalloc(_MIPP_ 10); #endif z.x.a=mirvar_mem(_MIPP_ mem, 0); z.x.b=mirvar_mem(_MIPP_ mem, 1); z.y.a=mirvar_mem(_MIPP_ mem, 2); z.y.b=mirvar_mem(_MIPP_ mem, 3); z.marker=MR_EPOINT_INFINITY; w.x.a=mirvar_mem(_MIPP_ mem, 4); w.x.b=mirvar_mem(_MIPP_ mem, 5); w.y.a=mirvar_mem(_MIPP_ mem, 6); w.y.b=mirvar_mem(_MIPP_ mem, 7); #ifndef MR_AFFINE_ONLY w.z.a=mirvar_mem(_MIPP_ mem, 8); w.z.b=mirvar_mem(_MIPP_ mem, 9); #endif w.marker=MR_EPOINT_INFINITY; len=B->n->len; maxsize=4*(1<<B->window)*len; for (i=t-1;i>=0;i--) { ecn2_add(_MIPP_ &w,&w); for (k=0;k<2;k++) { j=recode(_MIPP_ e[k],t,B->window,i); if (j>0) { promptr=4*j*len; init_big_from_rom(z.x.a,len,B->table,maxsize,&promptr); init_big_from_rom(z.x.b,len,B->table,maxsize,&promptr); init_big_from_rom(z.y.a,len,B->table,maxsize,&promptr); init_big_from_rom(z.y.b,len,B->table,maxsize,&promptr); z.marker=MR_EPOINT_NORMALIZED; if (k==1) ecn2_psi(_MIPP_ psi,&z); if (se[k]==PLUS) ecn2_add(_MIPP_ &z,&w); else ecn2_sub(_MIPP_ &z,&w); } } } ecn2_norm(_MIPP_ &w); ecn2_getxy(&w,x,y); #ifndef MR_STATIC memkill(_MIPP_ mem,10); #else memset(mem,0,MR_BIG_RESERVE(10)); #endif MR_OUT }
omp-parallel-nested-taskloop.c
#include <omp.h> #include <unistd.h> #include <stdio.h> #define THREADS 2 #define LOOPS 3 void work(int k, int t) { #pragma omp parallel num_threads(4) { #pragma omp task { printf("%d/%d in region %d at level %d (encountering thread is %d)\n", omp_get_thread_num(), omp_get_num_threads(), k, omp_get_level(), t ); } usleep(30); } } int main(void) { int j=0; omp_set_max_active_levels(2); #pragma omp parallel num_threads(THREADS) { #pragma omp single #pragma omp taskloop grainsize(1) for (j=0; j<LOOPS; j++) { work(j, omp_get_thread_num()); } } }
search.h
// -*- C++ -*- // Copyright (C) 2007-2014 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/search.h * @brief Parallel implementation base for std::search() and * std::search_n(). * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_SEARCH_H #define _GLIBCXX_PARALLEL_SEARCH_H 1 #include <bits/stl_algobase.h> #include <parallel/parallel.h> #include <parallel/equally_split.h> namespace __gnu_parallel { /** * @brief Precalculate __advances for Knuth-Morris-Pratt algorithm. * @param __elements Begin iterator of sequence to search for. * @param __length Length of sequence to search for. * @param __off Returned __offsets. */ template<typename _RAIter, typename _DifferenceTp> void __calc_borders(_RAIter __elements, _DifferenceTp __length, _DifferenceTp* __off) { typedef _DifferenceTp _DifferenceType; __off[0] = -1; if (__length > 1) __off[1] = 0; _DifferenceType __k = 0; for (_DifferenceType __j = 2; __j <= __length; __j++) { while ((__k >= 0) && !(__elements[__k] == __elements[__j-1])) __k = __off[__k]; __off[__j] = ++__k; } } // Generic parallel find algorithm (requires random access iterator). /** @brief Parallel std::search. * @param __begin1 Begin iterator of first sequence. * @param __end1 End iterator of first sequence. * @param __begin2 Begin iterator of second sequence. * @param __end2 End iterator of second sequence. * @param __pred Find predicate. * @return Place of finding in first sequences. */ template<typename __RAIter1, typename __RAIter2, typename _Pred> __RAIter1 __search_template(__RAIter1 __begin1, __RAIter1 __end1, __RAIter2 __begin2, __RAIter2 __end2, _Pred __pred) { typedef std::iterator_traits<__RAIter1> _TraitsType; typedef typename _TraitsType::difference_type _DifferenceType; _GLIBCXX_CALL((__end1 - __begin1) + (__end2 - __begin2)); _DifferenceType __pattern_length = __end2 - __begin2; // Pattern too short. if(__pattern_length <= 0) return __end1; // Last point to start search. _DifferenceType __input_length = (__end1 - __begin1) - __pattern_length; // Where is first occurrence of pattern? defaults to end. _DifferenceType __result = (__end1 - __begin1); _DifferenceType *__splitters; // Pattern too long. if (__input_length < 0) return __end1; omp_lock_t __result_lock; omp_init_lock(&__result_lock); _ThreadIndex __num_threads = std::max<_DifferenceType> (1, std::min<_DifferenceType>(__input_length, __get_max_threads())); _DifferenceType __advances[__pattern_length]; __calc_borders(__begin2, __pattern_length, __advances); # pragma omp parallel num_threads(__num_threads) { # pragma omp single { __num_threads = omp_get_num_threads(); __splitters = new _DifferenceType[__num_threads + 1]; __equally_split(__input_length, __num_threads, __splitters); } _ThreadIndex __iam = omp_get_thread_num(); _DifferenceType __start = __splitters[__iam], __stop = __splitters[__iam + 1]; _DifferenceType __pos_in_pattern = 0; bool __found_pattern = false; while (__start <= __stop && !__found_pattern) { // Get new value of result. #pragma omp flush(__result) // No chance for this thread to find first occurrence. if (__result < __start) break; while (__pred(__begin1[__start + __pos_in_pattern], __begin2[__pos_in_pattern])) { ++__pos_in_pattern; if (__pos_in_pattern == __pattern_length) { // Found new candidate for result. omp_set_lock(&__result_lock); __result = std::min(__result, __start); omp_unset_lock(&__result_lock); __found_pattern = true; break; } } // Make safe jump. __start += (__pos_in_pattern - __advances[__pos_in_pattern]); __pos_in_pattern = (__advances[__pos_in_pattern] < 0 ? 0 : __advances[__pos_in_pattern]); } } //parallel omp_destroy_lock(&__result_lock); delete[] __splitters; // Return iterator on found element. return (__begin1 + __result); } } // end namespace #endif /* _GLIBCXX_PARALLEL_SEARCH_H */
convolution_3x3_pack8to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack8to1_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int remain_outch_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out0.fill(bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { const float* r0 = img0.row(i); const float* r1 = img0.row(i + 1); const float* r2 = img0.row(i + 2); int j = 0; for (; j < outw; j++) { __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _sum0 = _mm256_mul_ps(_k00, _r00); __m256 _sum1 = _mm256_mul_ps(_k01, _r01); __m256 _sum2 = _mm256_mul_ps(_k02, _r02); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); _sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0); _sum1 = _mm256_fmadd_ps(_k11, _r11, _sum1); _sum2 = _mm256_fmadd_ps(_k12, _r12, _sum2); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0); _sum1 = _mm256_fmadd_ps(_k21, _r21, _sum1); _sum2 = _mm256_fmadd_ps(_k22, _r22, _sum2); __m128 _sum = HorizontalSums(_sum0, _sum1, _sum2); *outptr0 += _mm_reduce_add_ps(_sum); // dot outptr0++; r0 += 8; r1 += 8; r2 += 8; } } k0 += 9 * 8; } } }
GB_unaryop__lnot_int64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint8 // op(A') function: GB_tran__lnot_int64_uint8 // C type: int64_t // A type: uint8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint8 ( int64_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/tree_updater.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Init(const std::vector<std::pair<std::string, std::string> >& args) override { param_.InitAllowUnknown(args); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics for (const auto &batch : p_fmat->GetSortedColumnBatches()) { for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.size() != 0) { CHECK_LT(fid * 2, fminmax_.size()); fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const { std::vector<bst_uint> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto& ins : inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector<unsigned> &root_index = fmat.Info().root_index_; { // setup position position_.resize(gpair.size()); if (root_index.size() == 0) { std::fill(position_.begin(), position_.end(), 0); } else { for (size_t i = 0; i < position_.size(); ++i) { position_[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand_.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief this is helper function uses column based data structure, * reset the positions to the lastest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto &batch : p_fmat->GetSortedColumnBatches()) { for (auto fid : fsplits) { auto col = batch[fid]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; const MetaInfo &info = fmat.Info(); thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats()); for (unsigned int nid : qexpand_) { thread_temp[tid][nid] = TStats(); } } // setup position const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair[ridx]); } } // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s = TStats(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
omp_pause_resource.c
// RUN: %libomp-compile-and-run // Linking fails for icc 18/19 // UNSUPPORTED: icc-18, icc-19 #include <stdio.h> #include "omp_testsuite.h" int test_omp_pause_resource() { int fails, nthreads, my_dev; fails = 0; nthreads = 0; my_dev = omp_get_initial_device(); #pragma omp parallel #pragma omp single nthreads = omp_get_num_threads(); if (omp_pause_resource(omp_pause_soft, my_dev)) fails++; #pragma omp parallel shared(nthreads) #pragma omp single nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; if (omp_pause_resource(omp_pause_hard, my_dev)) fails++; nthreads = 0; #pragma omp parallel shared(nthreads) #pragma omp single nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; if (omp_pause_resource_all(omp_pause_soft)) fails++; nthreads = 0; #pragma omp parallel shared(nthreads) #pragma omp single nthreads = omp_get_num_threads(); if (nthreads == 0) fails++; return fails == 0; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_pause_resource()) { num_failed++; } } return num_failed; }
ConverterOSG.h
/* -*-c++-*- IfcQuery www.ifcquery.com * MIT License Copyright (c) 2017 Fabian Gerold Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <osg/CullFace> #include <osg/Geode> #include <osg/Hint> #include <osg/LineWidth> #include <osg/Material> #include <osg/Point> #include <osgUtil/Tessellator> #include <ifcpp/model/BasicTypes.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcCurtainWall.h> #include <ifcpp/IFC4/include/IfcFeatureElementSubtraction.h> #include <ifcpp/IFC4/include/IfcGloballyUniqueId.h> #include <ifcpp/IFC4/include/IfcProject.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/IFC4/include/IfcRelAggregates.h> #include <ifcpp/IFC4/include/IfcSpace.h> #include <ifcpp/IFC4/include/IfcWindow.h> #include <ifcpp/geometry/GeometrySettings.h> #include <ifcpp/geometry/SceneGraphUtils.h> #include <ifcpp/geometry/AppearanceData.h> #include "GeometryInputData.h" #include "IncludeCarveHeaders.h" #include "CSG_Adapter.h" class ConverterOSG : public StatusCallback { protected: shared_ptr<GeometrySettings> m_geom_settings; std::map<std::string, osg::ref_ptr<osg::Switch> > m_map_entity_guid_to_switch; std::map<int, osg::ref_ptr<osg::Switch> > m_map_representation_id_to_switch; double m_recent_progress; osg::ref_ptr<osg::CullFace> m_cull_back_off; osg::ref_ptr<osg::StateSet> m_glass_stateset; //\brief StateSet caching and re-use std::vector<osg::ref_ptr<osg::StateSet> > m_vec_existing_statesets; bool m_enable_stateset_caching = false; #ifdef ENABLE_OPENMP Mutex m_writelock_appearance_cache; #endif public: ConverterOSG( shared_ptr<GeometrySettings>& geom_settings ) : m_geom_settings(geom_settings) { m_cull_back_off = new osg::CullFace( osg::CullFace::BACK ); m_glass_stateset = new osg::StateSet(); m_glass_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); m_glass_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } virtual ~ConverterOSG() {} // Map: IfcProduct ID -> scenegraph switch std::map<std::string, osg::ref_ptr<osg::Switch> >& getMapEntityGUIDToSwitch() { return m_map_entity_guid_to_switch; } // Map: Representation Identifier -> scenegraph switch std::map<int, osg::ref_ptr<osg::Switch> >& getMapRepresentationToSwitch() { return m_map_representation_id_to_switch; } void clearInputCache() { m_map_entity_guid_to_switch.clear(); m_map_representation_id_to_switch.clear(); m_vec_existing_statesets.clear(); } static void drawBoundingBox( const carve::geom::aabb<3>& aabb, osg::Geometry* geom ) { osg::ref_ptr<osg::Vec3Array> vertices = dynamic_cast<osg::Vec3Array*>( geom->getVertexArray() ); if( !vertices ) { vertices = new osg::Vec3Array(); geom->setVertexArray( vertices ); } const carve::geom::vector<3>& aabb_pos = aabb.pos; const carve::geom::vector<3>& extent = aabb.extent; const double dex = extent.x; const double dey = extent.y; const double dez = extent.z; const int vert_id_offset = vertices->size(); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z + dez ) ); osg::ref_ptr<osg::DrawElementsUInt> box_lines = new osg::DrawElementsUInt( GL_LINE_STRIP, 0 ); box_lines->push_back( vert_id_offset + 0 ); box_lines->push_back( vert_id_offset + 1 ); box_lines->push_back( vert_id_offset + 2 ); box_lines->push_back( vert_id_offset + 3 ); box_lines->push_back( vert_id_offset + 0 ); box_lines->push_back( vert_id_offset + 4 ); box_lines->push_back( vert_id_offset + 5 ); box_lines->push_back( vert_id_offset + 1 ); box_lines->push_back( vert_id_offset + 5 ); box_lines->push_back( vert_id_offset + 6 ); box_lines->push_back( vert_id_offset + 2 ); box_lines->push_back( vert_id_offset + 6 ); box_lines->push_back( vert_id_offset + 7 ); box_lines->push_back( vert_id_offset + 3 ); box_lines->push_back( vert_id_offset + 7 ); box_lines->push_back( vert_id_offset + 4 ); geom->addPrimitiveSet( box_lines ); osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ) { throw OutOfMemoryException(); } osg::Vec4f ambientColor( 1.f, 0.2f, 0.1f, 1.f ); mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, ambientColor ); //mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); //mat->setColorMode( osg::Material::SPECULAR ); osg::StateSet* stateset = geom->getOrCreateStateSet(); if( !stateset ) { throw OutOfMemoryException(); } stateset->setAttribute( mat, osg::StateAttribute::ON ); stateset->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); } static void drawFace( const carve::mesh::Face<3>* face, osg::Geode* geode, bool add_color_array = false ) { #ifdef _DEBUG std::cout << "not triangulated" << std::endl; #endif std::vector<vec3> face_vertices; face_vertices.resize( face->nVertices() ); carve::mesh::Edge<3> *e = face->edge; const size_t num_vertices = face->nVertices(); for( size_t i = 0; i < num_vertices; ++i ) { face_vertices[i] = e->v1()->v; e = e->next; } if( num_vertices < 4 ) { std::cout << "drawFace is meant only for num vertices > 4" << std::endl; } vec3* vertex_vec; osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array( num_vertices ); if( !vertices ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::DrawElementsUInt> triangles = new osg::DrawElementsUInt( osg::PrimitiveSet::POLYGON, num_vertices ); if( !triangles ) { throw OutOfMemoryException(); } for( size_t i = 0; i < num_vertices; ++i ) { vertex_vec = &face_vertices[num_vertices - i - 1]; ( *vertices )[i].set( vertex_vec->x, vertex_vec->y, vertex_vec->z ); ( *triangles )[i] = i; } osg::Vec3f poly_normal = SceneGraphUtils::computePolygonNormal( vertices ); osg::ref_ptr<osg::Vec3Array> normals = new osg::Vec3Array(); normals->resize( num_vertices, poly_normal ); osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->setNormalArray( normals ); normals->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POLYGON, 0, vertices->size() ) ); if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->resize( vertices->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } if( num_vertices > 4 ) { // TODO: check if polygon is convex with Gift wrapping algorithm osg::ref_ptr<osgUtil::Tessellator> tesselator = new osgUtil::Tessellator(); tesselator->setTessellationType( osgUtil::Tessellator::TESS_TYPE_POLYGONS ); //tesselator->setWindingType( osgUtil::Tessellator::TESS_WINDING_ODD ); tesselator->retessellatePolygons( *geometry ); } geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < num_vertices; ++i ) { vertex_vec = &face_vertices[num_vertices - i - 1]; vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) + poly_normal ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( num_vertices * 2, osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); geode->addDrawable( geometry_normals ); #endif } //#define DEBUG_DRAW_NORMALS static void drawMeshSet( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* geode, double crease_angle = M_PI*0.05, bool add_color_array = false ) { if( !meshset ) { return; } osg::ref_ptr<osg::Vec3Array> vertices_tri = new osg::Vec3Array(); if( !vertices_tri ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::Vec3Array> normals_tri = new osg::Vec3Array(); if( !normals_tri ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::Vec3Array> vertices_quad; osg::ref_ptr<osg::Vec3Array> normals_quad; const size_t max_num_faces_per_vertex = 10000; std::map<carve::mesh::Face<3>*, double> map_face_area; std::map<carve::mesh::Face<3>*, double>::iterator it_face_area; if( crease_angle > 0 ) { for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const size_t num_faces = mesh->faces.size(); for( size_t i_face = 0; i_face != num_faces; ++i_face ) { carve::mesh::Face<3>* face = mesh->faces[i_face]; // compute area of projected face: std::vector<vec2> projected; face->getProjectedVertices( projected ); double face_area = carve::geom2d::signedArea( projected ); map_face_area[face] = abs( face_area ); } } } for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const size_t num_faces = mesh->faces.size(); for( size_t i_face = 0; i_face != num_faces; ++i_face ) { carve::mesh::Face<3>* face = mesh->faces[i_face]; const size_t n_vertices = face->nVertices(); if( n_vertices > 4 ) { drawFace( face, geode ); continue; } const vec3 face_normal = face->plane.N; if( crease_angle > 0 ) { carve::mesh::Edge<3>* e = face->edge; for( size_t jj = 0; jj < n_vertices; ++jj ) { carve::mesh::Vertex<3>* vertex = e->vert; vec3 intermediate_normal; // collect all faces at vertex // | ^ // | | // f1 e->rev | | e face // v | // <---e1------- <--------------- //-------------> ---------------> // | ^ // | | // v | carve::mesh::Edge<3>* e1 = e;// ->rev->next; carve::mesh::Face<3>* f1 = e1->face; #ifdef _DEBUG if( f1 != face ) { std::cout << "f1 != face" << std::endl; } #endif for( size_t i3 = 0; i3 < max_num_faces_per_vertex; ++i3 ) { if( !e1->rev ) { break; } if( !e1->rev->next ) { break; } vec3 f1_normal = f1->plane.N; const double cos_angle = dot( f1_normal, face_normal ); if( cos_angle > 0 ) { const double deviation = std::abs( cos_angle - 1.0 ); if( deviation < crease_angle ) { double weight = 0.0; it_face_area = map_face_area.find( f1 ); if( it_face_area != map_face_area.end() ) { weight = it_face_area->second; } intermediate_normal += weight*f1_normal; } } if( !e1->rev ) { // it's an open mesh break; } e1 = e1->rev->next; if( !e1 ) { break; } f1 = e1->face; #ifdef _DEBUG if( e1->vert != vertex ) { std::cout << "e1->vert != vertex" << std::endl; } #endif if( f1 == face ) { break; } } const double intermediate_normal_length = intermediate_normal.length(); if( intermediate_normal_length < 0.0000000001 ) { intermediate_normal = face_normal; } else { // normalize: intermediate_normal *= 1.0 / intermediate_normal_length; } const vec3& vertex_v = vertex->v; if( face->n_edges == 3 ) { vertices_tri->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); normals_tri->push_back( osg::Vec3( intermediate_normal.x, intermediate_normal.y, intermediate_normal.z ) ); } else if( face->n_edges == 4 ) { if( !vertices_quad ) vertices_quad = new osg::Vec3Array(); vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); if( !normals_quad ) normals_quad = new osg::Vec3Array(); normals_quad->push_back( osg::Vec3( intermediate_normal.x, intermediate_normal.y, intermediate_normal.z ) ); } e = e->next; } } else { carve::mesh::Edge<3>* e = face->edge; for( size_t jj = 0; jj < n_vertices; ++jj ) { carve::mesh::Vertex<3>* vertex = e->vert; const vec3& vertex_v = vertex->v; if( face->n_edges == 3 ) { vertices_tri->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); normals_tri->push_back( osg::Vec3( face_normal.x, face_normal.y, face_normal.z ) ); } else if( face->n_edges == 4 ) { if( !vertices_quad ) vertices_quad = new osg::Vec3Array(); vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); if( !normals_quad ) normals_quad = new osg::Vec3Array(); normals_quad->push_back( osg::Vec3( face_normal.x, face_normal.y, face_normal.z ) ); } e = e->next; } } } } if( vertices_tri->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices_tri ); geometry->setNormalArray( normals_tri ); normals_tri->setBinding( osg::Array::BIND_PER_VERTEX ); if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); if( !colors ) { throw OutOfMemoryException(); } colors->resize( vertices_tri->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::TRIANGLES, 0, vertices_tri->size() ) ); if( !geometry ) { throw OutOfMemoryException(); } geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < vertices_tri->size(); ++i ) { osg::Vec3f& vertex_vec = vertices_tri->at( i );// [i]; osg::Vec3f& normal_vec = normals_tri->at( i ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) + normal_vec ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( vertices_normals->size(), osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); geode->addDrawable( geometry_normals ); #endif } if( vertices_quad ) { if( vertices_quad->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices_quad ); if( normals_quad ) { normals_quad->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setNormalArray( normals_quad ); } if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); if( !colors ) { throw OutOfMemoryException(); } colors->resize( vertices_quad->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::QUADS, 0, vertices_quad->size() ) ); if( !geometry ) { throw OutOfMemoryException(); } geode->addDrawable( geometry ); } } } static void drawPolyline( const carve::input::PolylineSetData* polyline_data, osg::Geode* geode, bool add_color_array = false ) { osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); if( !vertices ) { throw OutOfMemoryException(); } carve::line::PolylineSet* polyline_set = polyline_data->create( carve::input::opts() ); if( polyline_set->vertices.size() < 2 ) { #ifdef _DEBUG std::cout << __FUNC__ << ": polyline_set->vertices.size() < 2" << std::endl; #endif return; } for( auto it = polyline_set->lines.begin(); it != polyline_set->lines.end(); ++it ) { const carve::line::Polyline* pline = *it; size_t vertex_count = pline->vertexCount(); for( size_t vertex_i = 0; vertex_i < vertex_count; ++vertex_i ) { if( vertex_i >= polyline_set->vertices.size() ) { #ifdef _DEBUG std::cout << __FUNC__ << ": vertex_i >= polyline_set->vertices.size()" << std::endl; #endif continue; } const carve::line::Vertex* v = pline->vertex( vertex_i ); vertices->push_back( osg::Vec3d( v->v[0], v->v[1], v->v[2] ) ); } } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINE_STRIP, 0, vertices->size() ) ); if( add_color_array ) { osg::Vec4f color( 0.6f, 0.6f, 0.6f, 0.1f ); osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array( vertices->size(), &color ); if( !colors ) { throw OutOfMemoryException(); } colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geode->addDrawable( geometry ); } static void computeCreaseEdgesFromMeshset( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, std::vector<carve::mesh::Edge<3>* >& vec_edges_out, const double crease_angle ) { if( !meshset ) { return; } for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const std::vector<carve::mesh::Edge<3>* >& vec_closed_edges = mesh->closed_edges; for( size_t i_edge = 0; i_edge < vec_closed_edges.size(); ++i_edge ) { carve::mesh::Edge<3>* edge = vec_closed_edges[i_edge]; if( !edge ) { continue; } carve::mesh::Edge<3>* edge_reverse = edge->rev; if( !edge_reverse ) { continue; } carve::mesh::Face<3>* face = edge->face; carve::mesh::Face<3>* face_reverse = edge_reverse->face; const carve::geom::vector<3>& f1_normal = face->plane.N; const carve::geom::vector<3>& f2_normal = face_reverse->plane.N; const double cos_angle = dot( f1_normal, f2_normal ); if( cos_angle > 0 ) { const double deviation = std::abs( cos_angle - 1.0 ); if( deviation < crease_angle ) { continue; } } // TODO: if area of face and face_reverse is equal, skip the crease edge. It could be the inside or outside of a cylinder. Check also if > 2 faces in a row have same normal angle differences vec_edges_out.push_back( edge ); } } } static void renderMeshsetCreaseEdges( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* target_geode, const double crease_angle ) { if( !meshset ) { return; } if( !target_geode ) { return; } std::vector<carve::mesh::Edge<3>* > vec_crease_edges; computeCreaseEdgesFromMeshset( meshset, vec_crease_edges, crease_angle ); if( vec_crease_edges.size() > 0 ) { osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t i_edge = 0; i_edge < vec_crease_edges.size(); ++i_edge ) { const carve::mesh::Edge<3>* edge = vec_crease_edges[i_edge]; const carve::geom::vector<3>& vertex1 = edge->v1()->v; const carve::geom::vector<3>& vertex2 = edge->v2()->v; vertices->push_back( osg::Vec3d( vertex1.x, vertex1.y, vertex1.z ) ); vertices->push_back( osg::Vec3d( vertex2.x, vertex2.y, vertex2.z ) ); } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setName("creaseEdges"); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices->size() ) ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry->getOrCreateStateSet()->setMode( GL_BLEND, osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::LineWidth( 3.0f ), osg::StateAttribute::ON ); osg::Material* mat = new osg::Material(); mat->setDiffuse(osg::Material::FRONT_AND_BACK, osg::Vec4f(0.3f, 0.3f, 0.35f, 0.8f)); geometry->getOrCreateStateSet()->setAttributeAndModes(mat, osg::StateAttribute::ON); geometry->getOrCreateStateSet()->setMode( GL_LINE_SMOOTH, osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::Hint( GL_LINE_SMOOTH_HINT, GL_NICEST ), osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setRenderBinDetails( 10, "RenderBin"); target_geode->addDrawable( geometry ); } } void applyAppearancesToGroup( const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances, osg::Group* grp ) { for( size_t ii = 0; ii < vec_product_appearances.size(); ++ii ) { const shared_ptr<AppearanceData>& appearance = vec_product_appearances[ii]; if( !appearance ) { continue; } if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_SURFACE || appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_ANY ) { osg::ref_ptr<osg::StateSet> item_stateset; convertToOSGStateSet( appearance, item_stateset ); if( item_stateset ) { osg::StateSet* existing_item_stateset = grp->getStateSet(); if( existing_item_stateset ) { if( existing_item_stateset != item_stateset ) { existing_item_stateset->merge( *item_stateset ); } } else { grp->setStateSet( item_stateset ); } } } else if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_CURVE ) { } } } osg::Matrixd convertMatrixToOSG( const carve::math::Matrix& mat_in ) { return osg::Matrixd( mat_in.m[0][0], mat_in.m[0][1], mat_in.m[0][2], mat_in.m[0][3], mat_in.m[1][0], mat_in.m[1][1], mat_in.m[1][2], mat_in.m[1][3], mat_in.m[2][0], mat_in.m[2][1], mat_in.m[2][2], mat_in.m[2][3], mat_in.m[3][0], mat_in.m[3][1], mat_in.m[3][2], mat_in.m[3][3] ); } //\brief method convertProductShapeToOSG: creates geometry objects from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertProductShapeToOSG( shared_ptr<ProductShapeData>& product_shape, std::map<int, osg::ref_ptr<osg::Switch> >& map_representation_switches ) { if( product_shape->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { return; } std::string product_guid; if (ifc_product->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; product_guid = converterX.to_bytes(ifc_product->m_GlobalId->m_value); } std::stringstream strs_product_switch_name; strs_product_switch_name << product_guid << ":" << ifc_product->className() << " group"; bool draw_bounding_box = false; // create OSG objects std::vector<shared_ptr<RepresentationData> >& vec_product_representations = product_shape->m_vec_representations; for( size_t ii_representation = 0; ii_representation < vec_product_representations.size(); ++ii_representation ) { const shared_ptr<RepresentationData>& product_representation_data = vec_product_representations[ii_representation]; if( product_representation_data->m_ifc_representation.expired() ) { continue; } shared_ptr<IfcRepresentation> ifc_representation( product_representation_data->m_ifc_representation ); const int representation_id = ifc_representation->m_entity_id; osg::ref_ptr<osg::Switch> representation_switch = new osg::Switch(); #ifdef _DEBUG std::stringstream strs_representation_name; strs_representation_name << strs_product_switch_name.str().c_str() << ", representation " << ii_representation; representation_switch->setName( strs_representation_name.str().c_str() ); #endif const std::vector<shared_ptr<ItemShapeData> >& product_items = product_representation_data->m_vec_item_data; for( size_t i_item = 0; i_item < product_items.size(); ++i_item ) { const shared_ptr<ItemShapeData>& item_shape = product_items[i_item]; osg::ref_ptr<osg::MatrixTransform> item_group = new osg::MatrixTransform(); if( !item_group ) { throw OutOfMemoryException( __FUNC__ ); } #ifdef _DEBUG std::stringstream strs_item_name; strs_item_name << strs_representation_name.str().c_str() << ", item " << i_item; item_group->setName( strs_item_name.str().c_str() ); #endif // create shape for open shells for( size_t ii = 0; ii < item_shape->m_meshsets_open.size(); ++ii ) { shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets_open[ii]; CSG_Adapter::retriangulateMeshSet( item_meshset ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } drawMeshSet( item_meshset, geode, m_geom_settings->getCoplanarFacesMaxDeltaAngle() ); if( m_geom_settings->getRenderCreaseEdges() ) { renderMeshsetCreaseEdges( item_meshset, geode, m_geom_settings->getCreaseEdgesMaxDeltaAngle() ); } // disable back face culling for open meshes geode->getOrCreateStateSet()->setAttributeAndModes( m_cull_back_off.get(), osg::StateAttribute::OFF ); item_group->addChild( geode ); if( draw_bounding_box ) { carve::geom::aabb<3> bbox = item_meshset->getAABB(); osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry(); drawBoundingBox( bbox, bbox_geom ); geode->addDrawable( bbox_geom ); } #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", open meshset " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } // create shape for meshsets for( size_t ii = 0; ii < item_shape->m_meshsets.size(); ++ii ) { shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets[ii]; CSG_Adapter::retriangulateMeshSet( item_meshset ); osg::ref_ptr<osg::Geode> geode_meshset = new osg::Geode(); if( !geode_meshset ) { throw OutOfMemoryException( __FUNC__ ); } drawMeshSet( item_meshset, geode_meshset, m_geom_settings->getCoplanarFacesMaxDeltaAngle() ); item_group->addChild( geode_meshset ); if( m_geom_settings->getRenderCreaseEdges() ) { renderMeshsetCreaseEdges( item_meshset, geode_meshset, m_geom_settings->getCreaseEdgesMaxDeltaAngle() ); } if( draw_bounding_box ) { carve::geom::aabb<3> bbox = item_meshset->getAABB(); osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry(); drawBoundingBox( bbox, bbox_geom ); geode_meshset->addDrawable( bbox_geom ); } #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", meshset " << ii; geode_meshset->setName( strs_item_meshset_name.str().c_str() ); #endif } // create shape for points const std::vector<shared_ptr<carve::input::VertexData> >& vertex_points = item_shape->getVertexPoints(); for( size_t ii = 0; ii < vertex_points.size(); ++ii ) { const shared_ptr<carve::input::VertexData>& pointset_data = vertex_points[ii]; if( pointset_data ) { if( pointset_data->points.size() > 0 ) { osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t i_pointset_point = 0; i_pointset_point < pointset_data->points.size(); ++i_pointset_point ) { vec3& carve_point = pointset_data->points[i_pointset_point]; vertices->push_back( osg::Vec3d( carve_point.x, carve_point.y, carve_point.z ) ); } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POINTS, 0, vertices->size() ) ); geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geode->getOrCreateStateSet()->setAttribute( new osg::Point( 3.0f ), osg::StateAttribute::ON ); geode->addDrawable( geometry ); geode->setCullingActive( false ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", vertex_point " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } } } // create shape for polylines for( size_t ii = 0; ii < item_shape->m_polylines.size(); ++ii ) { shared_ptr<carve::input::PolylineSetData>& polyline_data = item_shape->m_polylines[ii]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); drawPolyline( polyline_data.get(), geode ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", polylines " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } if( m_geom_settings->isShowTextLiterals() ) { for( size_t ii = 0; ii < item_shape->m_vec_text_literals.size(); ++ii ) { shared_ptr<TextItemData>& text_data = item_shape->m_vec_text_literals[ii]; if( !text_data ) { continue; } carve::math::Matrix& text_pos = text_data->m_text_position; // TODO: handle rotation std::string text_str; text_str.assign( text_data->m_text.begin(), text_data->m_text.end() ); osg::Vec3 pos2( text_pos._41, text_pos._42, text_pos._43 ); osg::ref_ptr<osgText::Text> txt = new osgText::Text(); if( !txt ) { throw OutOfMemoryException( __FUNC__ ); } txt->setFont( "fonts/arial.ttf" ); txt->setColor( osg::Vec4f( 0, 0, 0, 1 ) ); txt->setCharacterSize( 0.1f ); txt->setAutoRotateToScreen( true ); txt->setPosition( pos2 ); txt->setText( text_str.c_str() ); txt->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ){ throw OutOfMemoryException( __FUNC__ ); } geode->addDrawable( txt ); item_group->addChild( geode ); } } // apply statesets if there are any if( item_shape->m_vec_item_appearances.size() > 0 ) { applyAppearancesToGroup( item_shape->m_vec_item_appearances, item_group ); } // If anything has been created, add it to the representation group if( item_group->getNumChildren() > 0 ) { #ifdef _DEBUG if( item_group->getNumParents() > 0 ) { std::cout << __FUNC__ << ": item_group->getNumParents() > 0" << std::endl; } #endif representation_switch->addChild( item_group ); } } // apply statesets if there are any if( product_representation_data->m_vec_representation_appearances.size() > 0 ) { applyAppearancesToGroup( product_representation_data->m_vec_representation_appearances, representation_switch ); } // If anything has been created, add it to the product group if( representation_switch->getNumChildren() > 0 ) { #ifdef _DEBUG if( representation_switch->getNumParents() > 0 ) { std::cout << __FUNC__ << ": product_representation_switch->getNumParents() > 0" << std::endl; } #endif // enable transparency for certain objects if( dynamic_pointer_cast<IfcSpace>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); } else if( dynamic_pointer_cast<IfcCurtainWall>(ifc_product) || dynamic_pointer_cast<IfcWindow>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); SceneGraphUtils::setMaterialAlpha( representation_switch, 0.6f, true ); } // check if parent building element is window if( ifc_product->m_Decomposes_inverse.size() > 0 ) { for( size_t ii_decomposes = 0; ii_decomposes < ifc_product->m_Decomposes_inverse.size(); ++ii_decomposes ) { const weak_ptr<IfcRelAggregates>& decomposes_weak = ifc_product->m_Decomposes_inverse[ii_decomposes]; if( decomposes_weak.expired() ) { continue; } shared_ptr<IfcRelAggregates> decomposes_ptr(decomposes_weak); shared_ptr<IfcObjectDefinition>& relating_object = decomposes_ptr->m_RelatingObject; if( relating_object ) { if( dynamic_pointer_cast<IfcCurtainWall>(relating_object) || dynamic_pointer_cast<IfcWindow>(relating_object) ) { representation_switch->setStateSet(m_glass_stateset); SceneGraphUtils::setMaterialAlpha(representation_switch, 0.6f, true); } } } } map_representation_switches.insert( std::make_pair( representation_id, representation_switch ) ); } } // TODO: if no color or material is given, set color 231/219/169 for walls, 140/140/140 for slabs } /*\brief method convertToOSG: Creates geometry for OpenSceneGraph from given ProductShapeData. \param[out] parent_group Group to append the geometry. **/ void convertToOSG( const std::map<std::string, shared_ptr<ProductShapeData> >& map_shape_data, osg::ref_ptr<osg::Switch> parent_group ) { progressTextCallback( L"Converting geometry to OpenGL format ..." ); progressValueCallback( 0, "scenegraph" ); m_map_entity_guid_to_switch.clear(); m_map_representation_id_to_switch.clear(); m_vec_existing_statesets.clear(); shared_ptr<ProductShapeData> ifc_project_data; std::vector<shared_ptr<ProductShapeData> > vec_products; for( auto it = map_shape_data.begin(); it != map_shape_data.end(); ++it ) { shared_ptr<ProductShapeData> shape_data = it->second; if( shape_data ) { vec_products.push_back( shape_data ); } } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<std::string, osg::ref_ptr<osg::Switch> >* map_entity_guid = &m_map_entity_guid_to_switch; std::map<int, osg::ref_ptr<osg::Switch> >* map_representations = &m_map_representation_id_to_switch; const int num_products = (int)vec_products.size(); #ifdef ENABLE_OPENMP Mutex writelock_map; Mutex writelock_ifc_project; Mutex writelock_message_callback; #pragma omp parallel firstprivate(num_products) shared(map_entity_guid, map_representations) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,40) #endif for( int i = 0; i < num_products; ++i ) { shared_ptr<ProductShapeData>& shape_data = vec_products[i]; weak_ptr<IfcObjectDefinition>& ifc_object_def_weak = shape_data->m_ifc_object_definition; if( ifc_object_def_weak.expired() ) { continue; } shared_ptr<IfcObjectDefinition> ifc_object_def(shape_data->m_ifc_object_definition); shared_ptr<IfcProject> ifc_project = dynamic_pointer_cast<IfcProject>(ifc_object_def); if (ifc_project) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock(writelock_ifc_project); #endif ifc_project_data = shape_data; } shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if (!ifc_product) { continue; } std::stringstream thread_err; if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_product) ) { // geometry will be created in method subtractOpenings continue; } if( !ifc_product->m_Representation ) { continue; } const int product_id = ifc_product->m_entity_id; std::string product_guid; std::map<int, osg::ref_ptr<osg::Switch> > map_representation_switches; try { convertProductShapeToOSG( shape_data, map_representation_switches ); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { thread_err << e.what(); } catch( carve::exception& e ) { thread_err << e.str(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << product_id; } if (ifc_product->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; product_guid = converterX.to_bytes(ifc_product->m_GlobalId->m_value); } if( map_representation_switches.size() > 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); osg::ref_ptr<osg::MatrixTransform> product_transform = new osg::MatrixTransform(); product_transform->setMatrix( convertMatrixToOSG( shape_data->getTransform() ) ); product_switch->addChild( product_transform ); std::stringstream strs_product_switch_name; strs_product_switch_name << product_guid << ":" << ifc_product->className() << " group"; product_switch->setName( strs_product_switch_name.str().c_str() ); for( auto it_map = map_representation_switches.begin(); it_map != map_representation_switches.end(); ++it_map ) { osg::ref_ptr<osg::Switch>& repres_switch = it_map->second; product_transform->addChild( repres_switch ); } // apply statesets if there are any const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances = shape_data->getAppearances(); if( vec_product_appearances.size() > 0 ) { applyAppearancesToGroup( vec_product_appearances, product_switch ); } #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_entity_guid->insert(std::make_pair(product_guid, product_switch)); map_representations->insert( map_representation_switches.begin(), map_representation_switches.end() ); } if( thread_err.tellp() > 0 ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_message_callback ); #endif messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } // progress callback double progress = (double)i / (double)num_products; if( progress - m_recent_progress > 0.02 ) { #ifdef ENABLE_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "scenegraph" ); m_recent_progress = progress; } } } #ifdef ENABLE_OPENMP } // implicit barrier #endif try { // now resolve spatial structure if( ifc_project_data ) { resolveProjectStructure( ifc_project_data, parent_group ); } } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } progressValueCallback( 0.9, "scenegraph" ); } void addNodes( const std::map<std::string, shared_ptr<BuildingObject> >& map_shape_data, osg::ref_ptr<osg::Switch>& target_group ) { // check if there are entities that are not in spatial structure if( !target_group ) { target_group = new osg::Switch(); } for( auto it_product_shapes = map_shape_data.begin(); it_product_shapes != map_shape_data.end(); ++it_product_shapes ) { std::string product_guid = it_product_shapes->first; auto it_find = m_map_entity_guid_to_switch.find(product_guid); if( it_find != m_map_entity_guid_to_switch.end() ) { osg::ref_ptr<osg::Switch>& sw = it_find->second; if( sw ) { target_group->addChild( sw ); } } } } void resolveProjectStructure( const shared_ptr<ProductShapeData>& product_data, osg::ref_ptr<osg::Switch> group ) { if( !product_data ) { return; } if( product_data->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def(product_data->m_ifc_object_definition); if (!ifc_object_def) { return; } std::string guid; if (ifc_object_def->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; guid = converterX.to_bytes(ifc_object_def->m_GlobalId->m_value); } if( SceneGraphUtils::inParentList(guid, group ) ) { messageCallback( "Cycle in project structure detected", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__, ifc_object_def.get() ); return; } const std::vector<shared_ptr<ProductShapeData> >& vec_children = product_data->getChildren(); for( size_t ii = 0; ii < vec_children.size(); ++ii ) { const shared_ptr<ProductShapeData>& child_product_data = vec_children[ii]; if( !child_product_data ) { continue; } osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch(); if( !child_product_data->m_ifc_object_definition.expired() ) { shared_ptr<IfcObjectDefinition> child_obj_def( child_product_data->m_ifc_object_definition ); std::stringstream group_subparts_name; group_subparts_name << "#" << child_obj_def->m_entity_id << "="; group_subparts_name << child_obj_def->className(); group_subparts->setName( group_subparts_name.str().c_str() ); } group->addChild( group_subparts ); resolveProjectStructure( child_product_data, group_subparts ); } auto it_product_map = m_map_entity_guid_to_switch.find(guid); if( it_product_map != m_map_entity_guid_to_switch.end() ) { const osg::ref_ptr<osg::Switch>& product_switch = it_product_map->second; if( product_switch ) { group->addChild( product_switch ); } } else { if( group->getNumChildren() == 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); group->addChild( product_switch ); std::stringstream switch_name; switch_name << guid << ":" << ifc_object_def->className(); product_switch->setName( switch_name.str().c_str() ); } } } void clearAppearanceCache() { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif m_vec_existing_statesets.clear(); } void convertToOSGStateSet( const shared_ptr<AppearanceData>& appearence, osg::ref_ptr<osg::StateSet>& target_stateset ) { if( !appearence ) { return; } const float shininess = appearence->m_shininess; const float transparency = appearence->m_transparency; const bool set_transparent = appearence->m_set_transparent; const float color_ambient_r = appearence->m_color_ambient.r(); const float color_ambient_g = appearence->m_color_ambient.g(); const float color_ambient_b = appearence->m_color_ambient.b(); const float color_ambient_a = appearence->m_color_ambient.a(); const float color_diffuse_r = appearence->m_color_diffuse.r(); const float color_diffuse_g = appearence->m_color_diffuse.g(); const float color_diffuse_b = appearence->m_color_diffuse.b(); const float color_diffuse_a = appearence->m_color_diffuse.a(); const float color_specular_r = appearence->m_color_specular.r(); const float color_specular_g = appearence->m_color_specular.g(); const float color_specular_b = appearence->m_color_specular.b(); const float color_specular_a = appearence->m_color_specular.a(); if( m_enable_stateset_caching ) { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif for( size_t i = 0; i<m_vec_existing_statesets.size(); ++i ) { const osg::ref_ptr<osg::StateSet> stateset_existing = m_vec_existing_statesets[i]; if( !stateset_existing.valid() ) { continue; } osg::ref_ptr<osg::Material> mat_existing = (osg::Material*)stateset_existing->getAttribute( osg::StateAttribute::MATERIAL ); if( !mat_existing ) { continue; } // compare osg::Vec4f color_ambient_existing = mat_existing->getAmbient( osg::Material::FRONT_AND_BACK ); if( fabs( color_ambient_existing.r() - color_ambient_r ) > 0.03 ) break; if( fabs( color_ambient_existing.g() - color_ambient_g ) > 0.03 ) break; if( fabs( color_ambient_existing.b() - color_ambient_b ) > 0.03 ) break; if( fabs( color_ambient_existing.a() - color_ambient_a ) > 0.03 ) break; osg::Vec4f color_diffuse_existing = mat_existing->getDiffuse( osg::Material::FRONT_AND_BACK ); if( fabs( color_diffuse_existing.r() - color_diffuse_r ) > 0.03 ) break; if( fabs( color_diffuse_existing.g() - color_diffuse_g ) > 0.03 ) break; if( fabs( color_diffuse_existing.b() - color_diffuse_b ) > 0.03 ) break; if( fabs( color_diffuse_existing.a() - color_diffuse_a ) > 0.03 ) break; osg::Vec4f color_specular_existing = mat_existing->getSpecular( osg::Material::FRONT_AND_BACK ); if( fabs( color_specular_existing.r() - color_specular_r ) > 0.03 ) break; if( fabs( color_specular_existing.g() - color_specular_g ) > 0.03 ) break; if( fabs( color_specular_existing.b() - color_specular_b ) > 0.03 ) break; if( fabs( color_specular_existing.a() - color_specular_a ) > 0.03 ) break; float shininess_existing = mat_existing->getShininess( osg::Material::FRONT_AND_BACK ); if( fabs( shininess_existing - shininess ) > 0.03 ) break; bool blend_on_existing = stateset_existing->getMode( GL_BLEND ) == osg::StateAttribute::ON; if( blend_on_existing != set_transparent ) break; bool transparent_bin = stateset_existing->getRenderingHint() == osg::StateSet::TRANSPARENT_BIN; if( transparent_bin != set_transparent ) break; // if we get here, appearance is same as existing state set // TODO: block this re-used stateset for merging, or prevent merged statesets from being re-used target_stateset = stateset_existing; return; } } osg::Vec4f ambientColor( color_ambient_r, color_ambient_g, color_ambient_b, transparency ); osg::Vec4f diffuseColor( color_diffuse_r, color_diffuse_g, color_diffuse_b, transparency ); osg::Vec4f specularColor( color_specular_r, color_specular_g, color_specular_b, transparency ); // TODO: material caching and re-use osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ){ throw OutOfMemoryException(); } mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, diffuseColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, specularColor ); mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); mat->setColorMode( osg::Material::SPECULAR ); target_stateset = new osg::StateSet(); if( !target_stateset ){ throw OutOfMemoryException(); } target_stateset->setAttribute( mat, osg::StateAttribute::ON ); if( appearence->m_set_transparent ) { mat->setTransparency( osg::Material::FRONT_AND_BACK, transparency ); target_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); target_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } if( appearence->m_specular_exponent != 0.f ) { //osg::ref_ptr<osgFX::SpecularHighlights> spec_highlights = new osgFX::SpecularHighlights(); //spec_highlights->setSpecularExponent( spec->m_value ); // todo: add to scenegraph } if( m_enable_stateset_caching ) { m_vec_existing_statesets.push_back( target_stateset ); } } };
gemv_x_dia_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * n); memset(tmp[i], 0, sizeof(ALPHA_Number) * n); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; const ALPHA_INT row_start = alpha_max(0, -dis); const ALPHA_INT col_start = alpha_max(0, dis); const ALPHA_INT nnz = (m - row_start)<(n - col_start)?(m - row_start):(n - col_start); const ALPHA_INT start = i * A->lval; for (ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + row_start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < n; ++i) { alpha_mul(y[i], y[i], beta); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(int i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { return ONAME_omp(alpha, A, x, beta, y); }
argon2_base.h
#pragma once #include <cstdint> #include <stdexcept> #include <new> #include <cstdlib> #include <memory> #include "argonishche.h" #include "internal/blake2b/blake2b.h" namespace argonishche { const uint32_t ARGON2_PREHASH_DIGEST_LENGTH = 64; const uint32_t ARGON2_SECRET_MAX_LENGTH = 32; const uint32_t ARGON2_PREHASH_SEED_LENGTH = 72; const uint32_t ARGON2_BLOCK_SIZE = 1024; const uint32_t ARGON2_QWORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 8; const uint32_t ARGON2_OWORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 16; const uint32_t ARGON2_HWORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 32; const uint32_t ARGON2_ADDRESSES_IN_BLOCK = 128; const uint32_t ARGON2_SYNC_POINTS = 4; const uint32_t ARGON2_SALT_MIN_LEN = 8; const uint32_t ARGON2_MIN_OUTLEN = 4; struct block { uint64_t v[ARGON2_QWORDS_IN_BLOCK]; }; template <InstructionSet instructionSet, uint32_t mcost, uint32_t threads> class Argon2 : public Argon2Base { public: Argon2(Argon2Type atype, uint32_t tcost, const uint8_t *key, uint32_t keylen) : secretlen__(keylen), tcost__(tcost), atype__(atype) { if(secretlen__) memcpy(secret__, key, keylen); } virtual ~Argon2() override { if (secretlen__) { secure_zero_memory__(secret__, secretlen__); secretlen__ = 0; } } virtual void Hash(const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, uint8_t *out, uint32_t outlen, const uint8_t *aad = nullptr, uint32_t aadlen = 0) const override { std::unique_ptr<block[]> buffer(new block[memory_blocks__]); internal_hash__(buffer.get(), pwd, pwdlen, salt, saltlen, out, outlen, aad, aadlen); } virtual bool Verify(const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, const uint8_t *hash, uint32_t hashlen, const uint8_t *aad = nullptr, uint32_t aadlen = 0) const override { std::unique_ptr<uint8_t[]> hash_result(new uint8_t[hashlen]); Hash(pwd, pwdlen, salt, saltlen, hash_result.get(), hashlen, aad, aadlen); return secure_compare__(hash, hash_result.get(), hashlen); } virtual void HashWithCustomMemory(uint8_t* memory, size_t mlen, const uint8_t *pwd, uint32_t pwdlen, const uint8_t* salt, uint32_t saltlen, uint8_t* out, uint32_t outlen, const uint8_t* aad = nullptr, uint32_t aadlen = 0) const override { if(memory == nullptr || mlen < sizeof(block) * memory_blocks__) throw std::runtime_error("memory is null or its size is not enough"); internal_hash__((block*)memory, pwd, pwdlen, salt, saltlen, out, outlen, aad, aadlen); } virtual bool VerifyWithCustomMemory(uint8_t* memory, size_t mlen, const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, const uint8_t *hash, uint32_t hashlen, const uint8_t *aad = nullptr, uint32_t aadlen = 0) const override { std::unique_ptr<uint8_t[]> hash_result(new uint8_t[hashlen]); HashWithCustomMemory(memory, mlen, pwd, pwdlen, salt, saltlen, hash_result.get(), hashlen, aad, aadlen); return secure_compare__(hash_result.get(), hash, hashlen); } virtual size_t GetMemorySize() const override { return memory_blocks__ * sizeof(block); } protected: /* Constants */ uint8_t secret__[ARGON2_SECRET_MAX_LENGTH] = {0}; uint32_t secretlen__ = 0; uint32_t tcost__; Argon2Type atype__; static constexpr uint32_t lanes__ = threads; static constexpr uint32_t memory_blocks__ = (mcost >= 2 * ARGON2_SYNC_POINTS * lanes__) ? (mcost - mcost % (lanes__ * ARGON2_SYNC_POINTS)) : 2 * ARGON2_SYNC_POINTS * lanes__; static constexpr uint32_t segment_length__ = memory_blocks__ / (lanes__ * ARGON2_SYNC_POINTS); static constexpr uint32_t lane_length__ = segment_length__ * ARGON2_SYNC_POINTS; protected: /* Prototypes */ virtual void fill_block__(const block *prev_block, const block *ref_block, block *next_block, bool with_xor) const = 0; virtual void copy_block__(block *dst, const block *src) const = 0; virtual void xor_block__(block *dst, const block *src) const = 0; protected: /* Static functions */ static bool secure_compare__(const uint8_t* buffer1, const uint8_t* buffer2, uint32_t len) { bool result = true; for(uint32_t i = 0; i < len; ++i) { result &= (buffer1[i] == buffer2[i]); } return result; } static void secure_zero_memory__(void *src, size_t len) { static void *(*const volatile memset_v)(void *, int, size_t) = &memset; memset_v(src, 0, len); } static void store32__(uint32_t value, void *mem) { *((uint32_t *) mem) = value; } static void blake2b_hash64__(uint8_t out[BLAKE2B_OUTBYTES], const uint8_t in[BLAKE2B_OUTBYTES]) { Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(in, BLAKE2B_OUTBYTES); hash.Final(out, BLAKE2B_OUTBYTES); } static void argon2_expand_blockhash__(uint8_t expanded[ARGON2_BLOCK_SIZE], const uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]) { uint8_t out_buffer[BLAKE2B_OUTBYTES]; uint8_t in_buffer[BLAKE2B_OUTBYTES]; const uint32_t HALF_OUT_BYTES = BLAKE2B_OUTBYTES / 2; const uint32_t HASH_BLOCKS_COUNT = ((ARGON2_BLOCK_SIZE / HALF_OUT_BYTES)); Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(ARGON2_BLOCK_SIZE); hash.Update(blockhash, ARGON2_PREHASH_SEED_LENGTH); hash.Final(out_buffer, BLAKE2B_OUTBYTES); memcpy(expanded, out_buffer, HALF_OUT_BYTES); for (uint32_t i = 1; i < HASH_BLOCKS_COUNT - 2; ++i) { memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES); blake2b_hash64__(out_buffer, in_buffer); memcpy(expanded + (i * HALF_OUT_BYTES), out_buffer, HALF_OUT_BYTES); } blake2b_hash64__(in_buffer, out_buffer); memcpy(expanded + HALF_OUT_BYTES * (HASH_BLOCKS_COUNT - 2), in_buffer, BLAKE2B_OUTBYTES); } static void blake2b_long__(uint8_t* out, uint32_t outlen, const uint8_t* in, uint32_t inlen) { if(outlen < BLAKE2B_OUTBYTES) { Blake2B<instructionSet> hash(outlen); hash.Update(outlen); hash.Update(in, inlen); hash.Final(out, outlen); } else { uint8_t out_buffer[BLAKE2B_OUTBYTES]; uint8_t in_buffer[BLAKE2B_OUTBYTES]; uint32_t toproduce = outlen - BLAKE2B_OUTBYTES / 2; Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(outlen); hash.Update(in, inlen); hash.Final(out_buffer, BLAKE2B_OUTBYTES); memcpy(out, out_buffer, BLAKE2B_OUTBYTES / 2); out += BLAKE2B_OUTBYTES / 2; while(toproduce > BLAKE2B_OUTBYTES) { memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES); Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(in_buffer, BLAKE2B_OUTBYTES); hash.Final(out_buffer, BLAKE2B_OUTBYTES); memcpy(out, out_buffer, BLAKE2B_OUTBYTES / 2); out += BLAKE2B_OUTBYTES / 2; toproduce -= BLAKE2B_OUTBYTES / 2; } memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES); { Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(in_buffer, toproduce); hash.Final(out_buffer, BLAKE2B_OUTBYTES); memcpy(out, out_buffer, toproduce); } } } static void init_block_value__(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); } protected: /* Functions */ void internal_hash__(block* memory, const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, uint8_t *out, uint32_t outlen, const uint8_t *aad, uint32_t aadlen) const { /* * all parameters checks are in proxy objects */ initialize__(memory, outlen, pwd, pwdlen, salt, saltlen, aad, aadlen); fill_memory_blocks__(memory); finalize__(memory, out, outlen); } void initial_hash__(uint8_t blockhash[ARGON2_PREHASH_DIGEST_LENGTH], uint32_t outlen, const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, const uint8_t *aad, uint32_t aadlen) const { Blake2B<instructionSet> hash(ARGON2_PREHASH_DIGEST_LENGTH); /* lanes, but lanes == threads */ hash.Update(lanes__); /* outlen */ hash.Update(outlen); /* m_cost */ hash.Update(mcost); /* t_cost */ hash.Update(tcost__); /* version */ hash.Update(0x00000013); hash.Update((uint32_t)atype__); /* pwdlen */ hash.Update(pwdlen); /* pwd */ hash.Update(pwd, pwdlen); /* saltlen */ hash.Update(saltlen); /* salt */ if(saltlen) hash.Update(salt, saltlen); /* secret */ hash.Update(secretlen__); if (secretlen__) hash.Update((void *) secret__, secretlen__); /* aadlen */ hash.Update(aadlen); if (aadlen) hash.Update((void *) aad, aadlen); hash.Final(blockhash, ARGON2_PREHASH_DIGEST_LENGTH); } void fill_first_blocks__(block* blocks, uint8_t *blockhash) const { for (uint32_t l = 0; l < lanes__; l++) { /* fill the first block of the lane */ store32__(l, blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4); store32__(0, blockhash + ARGON2_PREHASH_DIGEST_LENGTH); argon2_expand_blockhash__((uint8_t*)&(blocks[l * lane_length__]), blockhash); /* fill the second block of the lane */ store32__(1, blockhash + ARGON2_PREHASH_DIGEST_LENGTH); argon2_expand_blockhash__((uint8_t*)&(blocks[l * lane_length__ + 1]), blockhash); } } /* The 'if' will be optimized out as the number of threads is known at the compile time */ void fill_memory_blocks__(block* memory) const { for (uint32_t t = 0; t < tcost__; ++t) { for (uint32_t s = 0; s < ARGON2_SYNC_POINTS; ++s) { #ifdef _OPENMP #pragma omp parallel for #endif for (uint32_t l = 0; l < lanes__; ++l) { fill_segment__(memory, t, l, s); } } } } void initialize__(block *memory, uint32_t outlen, const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, const uint8_t *aad, uint32_t aadlen) const { uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; initial_hash__(blockhash, outlen, pwd, pwdlen, salt, saltlen, aad, aadlen); fill_first_blocks__(memory, blockhash); } uint32_t compute_reference_area__(uint32_t pass, uint32_t slice, uint32_t index, bool same_lane) const { uint32_t pass_val = pass == 0 ? (slice * segment_length__) : (lane_length__ - segment_length__); return same_lane ? pass_val + (index - 1) : pass_val + (index == 0 ? -1 : 0); } uint32_t index_alpha__(uint32_t pass, uint32_t slice, uint32_t index, uint32_t pseudo_rand, bool same_lane) const { uint32_t reference_area_size = compute_reference_area__(pass, slice, index, same_lane); uint64_t relative_position = pseudo_rand; relative_position = relative_position * relative_position >> 32; relative_position = reference_area_size - 1 - (reference_area_size * relative_position >> 32); uint32_t start_position = 0; if (pass != 0) start_position = (slice == ARGON2_SYNC_POINTS - 1) ? 0 : (slice + 1) * segment_length__; return (uint32_t)((start_position + relative_position) % lane_length__); } void next_addresses(block *address_block, block *input_block, const block *zero_block) const { input_block->v[6]++; fill_block__(zero_block, input_block, address_block, false); fill_block__(zero_block, address_block, address_block, false); } void finalize__(const block* memory, uint8_t* out, uint32_t outlen) const { block blockhash; copy_block__(&blockhash, memory + lane_length__ - 1); /* XOR the last blocks */ for (uint32_t l = 1; l < lanes__; ++l) { uint32_t last_block_in_lane = l * lane_length__ + (lane_length__ - 1); xor_block__(&blockhash, memory + last_block_in_lane); } blake2b_long__(out, outlen, (uint8_t*)blockhash.v, ARGON2_BLOCK_SIZE); } /* The switch will be optimized out by the compiler as the type is known at the compile time */ void fill_segment__(block *memory, uint32_t pass, uint32_t lane, uint32_t slice) const { switch (atype__) { case Argon2Type::Argon2_d: fill_segment_d__(memory, pass, lane, slice); return; case Argon2Type::Argon2_i: fill_segment_i__(memory, pass, lane, slice, Argon2Type::Argon2_i); return; case Argon2Type::Argon2_id: if(pass == 0 && slice < ARGON2_SYNC_POINTS / 2) fill_segment_i__(memory, pass, lane, slice, Argon2Type::Argon2_id); else fill_segment_d__(memory, pass, lane, slice); return; } } void fill_segment_d__(block *memory, uint32_t pass, uint32_t lane, uint32_t slice) const { uint32_t starting_index = (pass == 0 && slice == 0) ? 2 : 0; uint32_t curr_offset = lane * lane_length__ + slice * segment_length__ + starting_index; uint32_t prev_offset = curr_offset + ((curr_offset % lane_length__ == 0) ? lane_length__ : 0) - 1; for (uint32_t i = starting_index; i < segment_length__; ++i, ++curr_offset, ++prev_offset) { if (curr_offset % lane_length__ == 1) { prev_offset = curr_offset - 1; } uint64_t pseudo_rand = memory[prev_offset].v[0]; uint64_t ref_lane = (pass == 0 && slice == 0) ? lane : (((pseudo_rand >> 32)) % lanes__); uint64_t ref_index = index_alpha__(pass, slice, i, (uint32_t)(pseudo_rand & 0xFFFFFFFF), ref_lane == lane); block* ref_block = memory + lane_length__ * ref_lane + ref_index; fill_block__(memory + prev_offset, ref_block, memory + curr_offset, pass != 0); } } void fill_segment_i__(block *memory, uint32_t pass, uint32_t lane, uint32_t slice, Argon2Type atp) const { block address_block, input_block, zero_block; init_block_value__(&zero_block, 0); init_block_value__(&input_block, 0); input_block.v[0] = pass; input_block.v[1] = lane; input_block.v[2] = slice; input_block.v[3] = memory_blocks__; input_block.v[4] = tcost__; input_block.v[5] = (uint64_t)atp; uint32_t starting_index = 0; if (pass == 0 && slice == 0) { starting_index = 2; next_addresses(&address_block, &input_block, &zero_block); } uint32_t curr_offset = lane * lane_length__ + slice * segment_length__ + starting_index; uint32_t prev_offset = curr_offset + ((curr_offset % lane_length__ == 0) ? lane_length__ : 0) - 1; for (uint32_t i = starting_index; i < segment_length__; ++i, ++curr_offset, ++prev_offset) { if (curr_offset % lane_length__ == 1) { prev_offset = curr_offset - 1; } if (i % ARGON2_ADDRESSES_IN_BLOCK == 0) { next_addresses(&address_block, &input_block, &zero_block); } uint64_t pseudo_rand = address_block.v[i % ARGON2_ADDRESSES_IN_BLOCK]; uint64_t ref_lane = (pass == 0 && slice == 0)? lane : (((pseudo_rand >> 32)) % lanes__); uint64_t ref_index = index_alpha__(pass, slice, i, (uint32_t)(pseudo_rand & 0xFFFFFFFF), ref_lane == lane); block* ref_block = memory + lane_length__ * ref_lane + ref_index; fill_block__(memory + prev_offset, ref_block, memory + curr_offset, pass != 0); } } }; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz-12,16)),t1);t3<=min(min(min(floord(Nt+Ny-4,16),floord(16*t1+Ny+29,16)),floord(32*t2+Ny+28,16)),floord(32*t1-32*t2+Nz+Ny+27,16));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(32*t2-Nz-252,256)),ceild(16*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(16*t1+Nx+29,256)),floord(32*t2+Nx+28,256)),floord(16*t3+Nx+12,256)),floord(32*t1-32*t2+Nz+Nx+27,256));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),16*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),16*t3+14),256*t4+254),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
update_ops_named_CNOT.c
#include <stddef.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _USE_SIMD #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #endif //void CNOT_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CNOT_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CNOT_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); void CNOT_gate(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { //CNOT_gate_old_single(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_old_parallel(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_single(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); //CNOT_gate_parallel(control_qubit_index, target_qubit_index, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { CNOT_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); } else { CNOT_gate_parallel_simd(control_qubit_index, target_qubit_index, state, dim); } #else CNOT_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { CNOT_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); } else { CNOT_gate_parallel_unroll(control_qubit_index, target_qubit_index, state, dim); } #else CNOT_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); #endif #endif } void CNOT_gate_single_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; if (target_qubit_index == 0) { // swap neighboring two basis for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; CTYPE temp = state[basis_index]; state[basis_index] = state[basis_index + 1]; state[basis_index + 1] = temp; } } else if (control_qubit_index == 0) { // no neighboring swap for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } else { // a,a+1 is swapped to a^m, a^m+1, respectively for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0 + 1]; state[basis_index_0] = state[basis_index_1]; state[basis_index_0 + 1] = state[basis_index_1 + 1]; state[basis_index_1] = temp0; state[basis_index_1 + 1] = temp1; } } } #ifdef _OPENMP void CNOT_gate_parallel_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; if (target_qubit_index == 0) { // swap neighboring two basis #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; CTYPE temp = state[basis_index]; state[basis_index] = state[basis_index + 1]; state[basis_index + 1] = temp; } } else if (control_qubit_index == 0) { // no neighboring swap #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } else { // a,a+1 is swapped to a^m, a^m+1, respectively #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0 + 1]; state[basis_index_0] = state[basis_index_1]; state[basis_index_0 + 1] = state[basis_index_1 + 1]; state[basis_index_1] = temp0; state[basis_index_1 + 1] = temp1; } } } #endif #ifdef _USE_SIMD void CNOT_gate_single_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; if (target_qubit_index == 0) { // swap neighboring two basis for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_permute4x64_pd(data, 78); // (3210) -> (1032) : 1*2 + 4*3 + 16*0 + 64*1 = 2+12+64=78 _mm256_storeu_pd(ptr, data); } } else if (control_qubit_index == 0) { // no neighboring swap for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } else { // a,a+1 is swapped to a^m, a^m+1, respectively for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #ifdef _OPENMP void CNOT_gate_parallel_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; if (target_qubit_index == 0) { // swap neighboring two basis #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_permute4x64_pd(data, 78); // (3210) -> (1032) : 1*2 + 4*3 + 16*0 + 64*1 = 2+12+64=78 _mm256_storeu_pd(ptr, data); } } else if (control_qubit_index == 0) { // no neighboring swap #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } else { // a,a+1 is swapped to a^m, a^m+1, respectively #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #endif #endif /* #ifdef _OPENMP void CNOT_gate_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } #endif void CNOT_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << max_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index); ITYPE basis_c1t0 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask; ITYPE basis_c1t1 = basis_c1t0 ^ target_mask; swap_amplitude(state, basis_c1t0, basis_c1t1); } } #ifdef _OPENMP void CNOT_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << max_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index); ITYPE basis_c1t0 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask; ITYPE basis_c1t1 = basis_c1t0 ^ target_mask; swap_amplitude(state, basis_c1t0, basis_c1t1); } } #endif void CNOT_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + control_mask; ITYPE basis_index_1 = basis_index_0 + target_mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } */
tls_test_c.c
/* tls_test_c.c -- test TLS common symbol Copyright (C) 2008-2020 Free Software Foundation, Inc. Written by Ian Lance Taylor <iant@google.com> This file is part of gold. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* The only way I know to get gcc to generate a TLS common symbol is to use a C file and an OpenMP directive. */ #include "config.h" #include <stdio.h> #define CHECK_EQ_OR_RETURN(var, expected) \ do \ { \ if ((var) != (expected)) \ { \ printf(#var ": expected %d, found %d\n", expected, var); \ return 0; \ } \ } \ while (0) #ifdef HAVE_OMP_SUPPORT int v7; #pragma omp threadprivate (v7) #endif int t11(void); int t11_last(void); int t11(void) { #ifdef HAVE_OMP_SUPPORT CHECK_EQ_OR_RETURN(v7, 0); v7 = 70; #endif return 1; } int t11_last(void) { #ifdef HAVE_OMP_SUPPORT CHECK_EQ_OR_RETURN(v7, 70); #endif return 1; }
SparseDenseProduct.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEDENSEPRODUCT_H #define EIGEN_SPARSEDENSEPRODUCT_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; }; template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType, int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor, bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1> struct sparse_time_dense_product_impl; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; typedef evaluator<Lhs> LhsEval; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { LhsEval lhsEval(lhs); Index n = lhs.outerSize(); #ifdef EIGEN_HAS_OPENMP Eigen::initParallel(); Index threads = Eigen::nbThreads(); #endif for(Index c=0; c<rhs.cols(); ++c) { #ifdef EIGEN_HAS_OPENMP // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems. // It basically represents the minimal amount of work to be done to be worth it. if(threads>1 && lhsEval.nonZerosEstimate() > 20000) { #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads) for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } else #endif { for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } } } static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col) { typename Res::Scalar tmp(0); for(LhsInnerIterator it(lhsEval,i); it ;++it) tmp += it.value() * rhs.coeff(it.index(),col); res.coeffRef(i,col) += alpha * tmp; } }; // FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format? // -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators // template<typename T1, typename T2/*, int Options_, typename _StrideType*/> // struct ScalarBinaryOpTraits<T1, Ref<T2/*, Options_, _StrideType*/> > // { // enum { // Defined = 1 // }; // typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType; // }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef evaluator<Lhs> LhsEval; typedef typename LhsEval::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { LhsEval lhsEval(lhs); for(Index c=0; c<rhs.cols(); ++c) { for(Index j=0; j<lhs.outerSize(); ++j) { // typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c); typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.coeffRef(it.index(),c) += it.value() * rhs_j; } } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef evaluator<Lhs> LhsEval; typedef typename LhsEval::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { Index n = lhs.rows(); LhsEval lhsEval(lhs); #ifdef EIGEN_HAS_OPENMP Eigen::initParallel(); Index threads = Eigen::nbThreads(); // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems. // It basically represents the minimal amount of work to be done to be worth it. if(threads>1 && lhsEval.nonZerosEstimate()*rhs.cols() > 20000) { #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads) for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i); } else #endif { for(Index i=0; i<n; ++i) processRow(lhsEval, rhs, res, alpha, i); } } static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, Res& res, const typename Res::Scalar& alpha, Index i) { typename Res::RowXpr res_i(res.row(i)); for(LhsInnerIterator it(lhsEval,i); it ;++it) res_i += (alpha*it.value()) * rhs.row(it.index()); } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index j=0; j<lhs.outerSize(); ++j) { typename Rhs::ConstRowXpr rhs_j(rhs.row(j)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.row(it.index()) += (alpha*it.value()) * rhs_j; } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType> inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha); } } // end namespace internal namespace internal { template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested; typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType> : generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> {}; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dst> static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested; typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); // transpose everything Transpose<Dst> dstT(dst); internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType> : generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> {}; template<typename LhsT, typename RhsT, bool NeedToTranspose> struct sparse_dense_outer_product_evaluator { protected: typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1; typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs; typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType; // if the actual left-hand side is a dense vector, // then build a sparse-view so that we can seamlessly iterate over it. typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1, SparseView<Lhs1> >::type ActualLhs; typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1 const&, SparseView<Lhs1> >::type LhsArg; typedef evaluator<ActualLhs> LhsEval; typedef evaluator<ActualRhs> RhsEval; typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator; typedef typename ProdXprType::Scalar Scalar; public: enum { Flags = NeedToTranspose ? RowMajorBit : 0, CoeffReadCost = HugeCost }; class InnerIterator : public LhsIterator { public: InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer) : LhsIterator(xprEval.m_lhsXprImpl, 0), m_outer(outer), m_empty(false), m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() )) {} EIGEN_STRONG_INLINE Index outer() const { return m_outer; } EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; } EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); } protected: Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const { return rhs.coeff(outer); } Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse()) { typename RhsEval::InnerIterator it(rhs, outer); if (it && it.index()==0 && it.value()!=Scalar(0)) return it.value(); m_empty = true; return Scalar(0); } Index m_outer; bool m_empty; Scalar m_factor; }; sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } // transpose case sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } protected: const LhsArg m_lhs; evaluator<ActualLhs> m_lhsXprImpl; evaluator<ActualRhs> m_rhsXprImpl; }; // sparse * dense outer product template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSEDENSEPRODUCT_H
lu_par_loop.c
#include "trace.h" #include "common.h" #include <omp.h> /* This routine performs the LU factorization of a square matrix by block-columns */ void lu_par_loop(Matrix A, info_type info){ int i, j; /* Initialize the tracing system */ trace_init(); for(i=0; i<info.NB; i++){ /* Do the Panel operation on column i */ panel(A[i], i, info); /* Parallelize this loop */ #pragma omp parallel for for(j = i + 1; j < info.NB; j++){ /* Update column j with respect to the result of panel(A, i) */ update(A[i], A[j], i, j, info); } } /* This routine applies permutations resulting from numerical pivoting. It has to be executed sequentially. */ backperm(A, info); /* Write the trace in file (ignore) */ trace_dump("trace_par_loop.svg"); return; }
displacement_lagrangemultiplier_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; /// The epsilon tolerance definition static constexpr double Tolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /// Constructor. /** * @param DispRatioTolerance Relative tolerance for displacement error * @param DispAbsTolerance Absolute tolerance for displacement error * @param LMRatioTolerance Relative tolerance for lagrange multiplier error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); // The displacement solution mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The contact solution mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "print_convergence_criterion" : false, "displacement_relative_tolerance" : 1.0e-4, "displacement_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement solution mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble(); // The contact solution mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); } // Copy constructor. DisplacementLagrangeMultiplierContactCriteria( DisplacementLagrangeMultiplierContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) { } /// Destructor. ~DisplacementLagrangeMultiplierContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something // Initialize TDataType disp_solution_norm = 0.0, lm_solution_norm = 0.0, disp_increase_norm = 0.0, lm_increase_norm = 0.0; IndexType disp_dof_num(0),lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType dof_value = 0.0, dof_incr = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Loop over Dofs #pragma omp parallel for firstprivate(dof_id, dof_value ,dof_incr) reduction(+:disp_solution_norm, lm_solution_norm, disp_increase_norm, lm_increase_norm, disp_dof_num, lm_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id] == 1) { dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_solution_norm += dof_value * dof_value; lm_increase_norm += dof_incr * dof_incr; lm_dof_num++; } else { disp_solution_norm += dof_value * dof_value; disp_increase_norm += dof_incr * dof_incr; disp_dof_num++; } } } } if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0; if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0; if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm); const TDataType lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0; const TDataType disp_abs = std::sqrt(disp_increase_norm)/static_cast<TDataType>(disp_dof_num); const TDataType lm_abs = std::sqrt(lm_increase_norm)/static_cast<TDataType>(lm_dof_num); // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance; } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT(" LAGRANGE MUL:\tRATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << " LAGRANGE MUL:\tRATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } // We check if converged const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance); const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance); if (disp_converged && lm_converged) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM std::vector<int> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H */
GB_unaryop__minv_uint8_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint8_int8 // op(A') function: GB_tran__minv_uint8_int8 // C type: uint8_t // A type: int8_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint8_int8 ( uint8_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
expected_output.c
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> #include <polybench.h> #include "syrk.h" /** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /*syrk.c: this file is part of PolyBench/C*/ /*Include polybench common header.*/ /*Include benchmark-specific header.*/ /*Array initialization.*/ static void init_array(int n, int m, double *alpha, double *beta, double C[1200][1200], double A[1200][1000]) { int i, j; *alpha = 1.5; *beta = 1.2; for(i = 0; i < n; i++) for(j = 0; j < m; j++) A[i][j] = (double) ((i * j + 1) % n) / n; for(i = 0; i < n; i++) for(j = 0; j < n; j++) C[i][j] = (double) ((i * j + 2) % m) / m; } /*DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output.*/ static void print_array(int n, double C[1200][1200]) { int i, j; fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n"); fprintf(stderr, "begin dump: %s", "C"); for(i = 0; i < n; i++) for(j = 0; j < n; j++) { if((i * n + j) % 20 == 0) fprintf(stderr, "\n"); fprintf(stderr, "%0.2lf ", C[i][j]); } fprintf(stderr, "\nend dump: %s\n", "C"); fprintf(stderr, "==END DUMP_ARRAYS==\n"); } /*Main computational kernel. The whole function will be timed, including the call and return.*/ static void kernel_syrk(int n, int m, double alpha, double beta, double C[1200][1200], double A[1200][1000]) { int i, j, k; #pragma omp parallel for default(shared) private(i, j, k) firstprivate(n, beta, m, alpha, A) for(i = 0; i < n; i++) { // #pragma omp parallel for default(shared) private(j) firstprivate(i, beta) for(j = 0; j <= i; j++) C[i][j] *= beta; // #pragma omp parallel for default(shared) private(k, j) firstprivate(m, i, alpha, A) for(k = 0; k < m; k++) { // #pragma omp parallel for default(shared) private(j) firstprivate(i, alpha, k, A) for(j = 0; j <= i; j++) C[i][j] += alpha * A[i][k] * A[j][k]; } } } int main(int argc, char **argv) { /*Retrieve problem size.*/ int n = 1200; int m = 1000; /*Variable declaration/allocation.*/ double alpha; double beta; double (*C)[1200][1200]; C = (double (*)[1200][1200]) polybench_alloc_data((1200 + 0) * (1200 + 0), sizeof(double)); ; double (*A)[1200][1000]; A = (double (*)[1200][1000]) polybench_alloc_data((1200 + 0) * (1000 + 0), sizeof(double)); ; /*Initialize array(s).*/ init_array(n, m, &alpha, &beta, *C, *A); /*Start timer.*/ ; /*Run kernel.*/ kernel_syrk(n, m, alpha, beta, *C, *A); /*Stop and print timer.*/ ; ; /*Prevent dead-code elimination. All live-out data must be printed by the function call in argument.*/ if(argc > 42 && !strcmp(argv[0], "")) print_array(n, *C); /*Be clean.*/ free((void *) C); ; free((void *) A); ; return 0; }
SpatialConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialConvolutionMM.c" #else /* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */ static void nn_(unfolded_acc)(THTensor *finput, THTensor *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { int nip; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); #pragma omp parallel for private(nip) for(nip = 0; nip < nInputPlane; nip++) { int kw, kh, y, x, ix, iy; for(kh = 0; kh < kH; kh++) { for(kw = 0; kw < kW; kw++) { real *src = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); real *dst = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { int lpad,rpad; for(y = 0; y < outputHeight; y++) { iy = y*dH - padH + kh; if (iy < 0 || iy >= inputHeight) { } else { if (dW==1){ ix = 0 - padW + kw; lpad = fmaxf(0,padW-kw); rpad = fmaxf(0,padW-(kW-kw-1)); THVector_(add)(dst+iy*inputWidth+ix+lpad, src+y*outputWidth+lpad, 1, outputWidth - lpad - rpad); /* note: THVector_add could handle 1 value better */ } else{ for (x=0; x<outputWidth; x++){ ix = x*dW - padW + kw; if (ix < 0 || ix >= inputWidth){ }else THVector_(add)(dst+iy*inputWidth+ix, src+y*outputWidth+x, 1, 1); } } } } } else { for(y = 0; y < outputHeight; y++) { iy = y*dH + kh; ix = 0 + kw; if (dW == 1 ) { THVector_(add)(dst+iy*inputWidth+ix, src+y*outputWidth, 1, outputWidth); /* note: THVector_add could handle 1 value better */ } else{ for(x = 0; x < outputWidth; x++) THVector_(add)(dst+iy*inputWidth+ix+x*dW, src+y*outputWidth+x, 1, 1); } } } } } } } static void nn_(unfolded_copy)(THTensor *finput, THTensor *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { long k; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane*kH*kW; k++) { int nip = k / (kH*kW); int rest = k % (kH*kW); int kh = rest / kW; int kw = rest % kW; int x,y,ix,iy; real *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); real *src = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { int lpad,rpad; for(y = 0; y < outputHeight; y++) { iy = y*dH - padH + kh; if (iy < 0 || iy >= inputHeight) { memset(dst+y*outputWidth, 0, sizeof(real)*outputWidth); } else { if (dW==1){ ix = 0 - padW + kw; lpad = fmaxf(0,padW-kw); rpad = fmaxf(0,padW-(kW-kw-1)); if (outputWidth-rpad-lpad <= 0) { memset(dst+y*outputWidth, 0, sizeof(real)*outputWidth); } else { if (lpad > 0) memset(dst+y*outputWidth, 0, sizeof(real)*lpad); memcpy(dst+y*outputWidth+lpad, src+iy*inputWidth+ix+lpad, sizeof(real)*(outputWidth-rpad-lpad)); if (rpad > 0) memset(dst+y*outputWidth + outputWidth - rpad, 0, sizeof(real)*rpad); } } else{ for (x=0; x<outputWidth; x++){ ix = x*dW - padW + kw; if (ix < 0 || ix >= inputWidth) memset(dst+y*outputWidth+x, 0, sizeof(real)*1); else memcpy(dst+y*outputWidth+x, src+iy*inputWidth+ix, sizeof(real)*(1)); } } } } } else { for(y = 0; y < outputHeight; y++) { iy = y*dH + kh; ix = 0 + kw; if (dW == 1) memcpy(dst+y*outputWidth, src+iy*inputWidth+ix, sizeof(real)*outputWidth); else{ for (x=0; x<outputWidth; x++) memcpy(dst+y*outputWidth+x, src+iy*inputWidth+ix+x*dW, sizeof(real)*(1)); } } } } } static void nn_(SpatialConvolutionMM_updateOutput_frame)(THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int kH, int dW, int dH, int padW, int padH, long nInputPlane, long inputWidth, long inputHeight, long nOutputPlane, long outputWidth, long outputHeight) { long i; THTensor *output2d; nn_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight); output2d = THTensor_(newWithStorage2d)(output->storage, output->storageOffset, nOutputPlane, -1, outputHeight*outputWidth, -1); for(i = 0; i < nOutputPlane; i++) THVector_(fill)(output->storage->data+output->storageOffset+output->stride[0]*i, THTensor_(get1d)(bias, i), outputHeight*outputWidth); THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); THTensor_(free)(output2d); } static int nn_(SpatialConvolutionMM_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int padW = luaT_getfieldcheckint(L, 1, "padW"); int padH = luaT_getfieldcheckint(L, 1, "padH"); THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); int dimf = 0; int dimw = 2; int dimh = 1; long nInputPlane; long inputWidth; long inputHeight; long nOutputPlane; long outputWidth; long outputHeight; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D(batch mode) tensor expected"); if (input->nDimension == 4) { dimf++; dimw++; dimh++; } nInputPlane = input->size[dimf]; inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; nOutputPlane = weight->size[0]; outputWidth = (inputWidth + 2*padW - kW) / dW + 1; outputHeight = (inputHeight + 2*padH - kH) / dH + 1; if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nInputPlane,outputHeight,outputWidth); if(input->nDimension == 3) { THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); nn_(SpatialConvolutionMM_updateOutput_frame)(input, output, weight, bias, finput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { long T = input->size[0]; long t; THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth); #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); nn_(SpatialConvolutionMM_updateOutput_frame)(input_t, output_t, weight, bias, finput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } } return 1; } static void nn_(SpatialConvolutionMM_updateGradInput_frame)(THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d)(gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2], -1); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); THTensor_(free)(gradOutput2d); THTensor_(zero)(gradInput); nn_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH, gradInput->size[0], gradInput->size[2], gradInput->size[1], gradOutput->size[2], gradOutput->size[1]); } static int nn_(SpatialConvolutionMM_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int padW = luaT_getfieldcheckint(L, 1, "padW"); int padH = luaT_getfieldcheckint(L, 1, "padH"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor); THTensor *fgradInput = luaT_getfieldcheckudata(L, 1, "fgradInput", torch_Tensor); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); THTensor_(transpose)(weight, weight, 0, 1); if(input->nDimension == 3) { nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput, weight, fgradInput, kW, kH, dW, dH, padW, padH); } else { long T = input->size[0]; long t; #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t, weight, fgradInput_t, kW, kH, dW, dH, padW, padH); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } } THTensor_(transpose)(weight, weight, 0, 1); return 1; } static void nn_(SpatialConvolutionMM_accGradParameters_frame)(THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale) { long i; THTensor *gradOutput2d = THTensor_(newWithStorage2d)(gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2], -1); THTensor_(transpose)(finput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, finput); THTensor_(transpose)(finput, finput, 0, 1); for(i = 0; i < gradBias->size[0]; i++) { long k; real sum = 0; real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0]; for(k = 0; k < gradOutput2d->size[1]; k++) sum += data[k]; (gradBias->storage->data + gradBias->storageOffset)[i] += scale*sum; } THTensor_(free)(gradOutput2d); } static int nn_(SpatialConvolutionMM_accGradParameters)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); real scale = luaL_optnumber(L, 4, 1); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor); THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor); THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_Tensor); THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); if(input->nDimension == 3) { nn_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else { long T = input->size[0]; long t; for(t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); nn_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); THTensor_(free)(gradOutput_t); THTensor_(free)(finput_t); } } return 0; } static const struct luaL_Reg nn_(SpatialConvolutionMM__) [] = { {"SpatialConvolutionMM_updateOutput", nn_(SpatialConvolutionMM_updateOutput)}, {"SpatialConvolutionMM_updateGradInput", nn_(SpatialConvolutionMM_updateGradInput)}, {"SpatialConvolutionMM_accGradParameters", nn_(SpatialConvolutionMM_accGradParameters)}, {NULL, NULL} }; static void nn_(SpatialConvolutionMM_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(SpatialConvolutionMM__), "nn"); lua_pop(L,1); } #endif
mlpcell_bf16.h
#ifndef MLPCELL_BF16 #define MLPCELL_BF16 #include "mc_funcs.h" #define PCL_ASSERT(cond, x...) do { if(!(cond)) { printf(x); fflush(stdout); exit(1); } } while(0) #define DECL_VLA_PTR(type, name, dims, ptr) type (*name)dims = (type (*)dims)ptr #define DECL_VLA_PTR_CHECK_VAR(var, type, name, dims, ptr) type (*name)dims = (var > 0) ? (type (*)dims)ptr : NULL #define DECL_VLA_PTR_CHECK_COND(cond, type, name, dims, ptr) type (*name)dims = cond ? (type (*)dims)ptr : NULL #define DECL_VLA_PTR_CHECK_COND_VAR(cond, var, type, name, dims, ptr) type (*name)dims = (cond && var > 0) ? (type (*)dims)ptr : NULL #define DECL_VLA_PTR_PT(type, name, dims, t) type (*name)dims = (type (*)dims)(t.data_ptr<type>()) #define DECL_VLA_PTR_PT_CHECK_COND(cond, type, name, dims, t) type (*name)dims = cond ? (type (*)dims)(t.data_ptr<type>()) : NULL #define DECL_VLA_PTR_NPT(newtype, type, name, dims, t) newtype (*name)dims = (newtype (*)dims)(t.data_ptr<type>()) #define DECL_VLA_PTR_NPT_CHECK_COND(cond, newtype, type, name, dims, t) newtype (*name)dims = cond ? (newtype (*)dims)(t.data_ptr<type>()) : NULL #define LIBXSMM_ALIGNDOWN(N, A) ((N) & ~((A)-1)) //--------------------------------------norm_to_vnni----------------------------------------------------- // void norm_to_vnni_16b(libxsmm_bfloat16* in, libxsmm_bfloat16* out, int N, int M) { libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_type trans_type; trans_param.in.primary = (void*)in; trans_param.out.primary = (void*)out; if ( N % 2 == 1 ) { trans_type = LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI_PAD; } else { trans_type = LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI; } libxsmm_meltwfunction_unary trans_kernel = libxsmm_dispatch_meltw_unary(M, N, (libxsmm_blasint*)&M, (libxsmm_blasint*)&M, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, trans_type); if ( trans_kernel == NULL ) { fprintf( stderr, "JIT for NORM_TO_VNNI TPP. Bailing...!\n"); exit(-1); } trans_kernel( &trans_param ); } //--------------------------------------norm_to_normT----------------------------------------------------- // void norm_to_normT_16b(libxsmm_bfloat16* in, libxsmm_bfloat16* out, int N, int M) { libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_type trans_type; trans_param.in.primary = (void*)in; trans_param.out.primary = (void*)out; trans_type = LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT; libxsmm_meltwfunction_unary trans_kernel = libxsmm_dispatch_meltw_unary(M, N, (libxsmm_blasint*)&M, (libxsmm_blasint*)&N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, trans_type); if ( trans_kernel == NULL ) { fprintf( stderr, "JIT for NORM_TO_NORMT TPP. Bailing...!\n"); exit(-1); } trans_kernel( &trans_param ); } //--------------------------------------------------convert f32 to bf16 TPP------------------------------------- inline void cvt_f32_bf16(int N, int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_meltwfunction_unary cvt_f32_bf16_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, unary_flags, unary_type ); PCL_ASSERT(cvt_f32_bf16_kernel, "Null cvt_f32_bf16 kernel"); cvt_f32_bf16_kernel(params); } inline void bf16_copy(int N, int M, int LDO, int LDI, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_BF16; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, (libxsmm_blasint*)&LDI, (libxsmm_blasint*)&LDO, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for bf16 to b16 copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void colbcast_bf16_copy(int N, int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_BF16; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for bf16 to b16 broadcast copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void bf16_f32_copy(int N, int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_F32; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for bf16 to f32 copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void add_bf16_bf16(int N, int M, libxsmm_meltw_binary_param *binary_param) { libxsmm_meltw_binary_flags binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; libxsmm_meltw_binary_type binary_type = LIBXSMM_MELTW_TYPE_BINARY_ADD; libxsmm_meltwfunction_binary add_kernel = libxsmm_dispatch_meltw_binary(M, N, (libxsmm_blasint*)&M, (libxsmm_blasint*)&M, (libxsmm_blasint*)&N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, binary_flags, binary_type); if ( add_kernel == NULL ) { fprintf( stderr, "JIT for BINARY TPP. Bailing...!\n"); exit(-1); } add_kernel(binary_param); } inline void relu_fwd_bf16(long N, long M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; libxsmm_meltwfunction_unary relu_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, unary_flags, LIBXSMM_MELTW_TYPE_UNARY_RELU); if ( relu_kernel == NULL ) { fprintf( stderr, "JIT for ReLU TPP. Bailing...!\n"); exit(-1); } relu_kernel( params ); } inline void relu_bwd_bf16(long N, long M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; libxsmm_meltwfunction_unary relu_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, unary_flags, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( relu_kernel == NULL ) { fprintf( stderr, "JIT for ReLU TPP. Bailing...!\n"); exit(-1); } relu_kernel( params ); } inline void dropout_bf16(long N, long M, libxsmm_meltw_unary_param *params, libxsmm_meltw_unary_flags flags) { libxsmm_meltwfunction_unary dropout_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, flags, LIBXSMM_MELTW_TYPE_UNARY_DROPOUT); if ( dropout_kernel == NULL ) { fprintf( stderr, "JIT for DROPOUT TPP. Bailing...!\n"); exit(-1); } dropout_kernel( params ); } inline void dropout_bwd_bf16(long N, long M, libxsmm_meltw_unary_param *params, libxsmm_meltw_unary_flags flags) { libxsmm_meltwfunction_unary dropout_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, flags, LIBXSMM_MELTW_TYPE_UNARY_DROPOUT_INV); if ( dropout_kernel == NULL ) { fprintf( stderr, "JIT for DROPOUT TPP. Bailing...!\n"); exit(-1); } dropout_kernel( params ); } inline void brgemm_bf16_f32(long n, long m, long k, long stride_b, long stride_a, libxsmm_bfloat16 *B_, libxsmm_bfloat16 *A_, float *C, long count, const float beta = 1.0, const char b_trans='n', const char a_trans='n', const char b_vnni='n', const char a_vnni='n') { const float alpha = 1.0; libxsmm_bfloat16 *A = A_; libxsmm_bfloat16 *B = B_; unsigned long long l_br = count; int flags = LIBXSMM_GEMM_VNNI_FLAGS('n', 'n', 'v', 'n'); // Query or JIT-generate reduction kernel; returns NULL if JIT is not supported (bf16 inputs, fp32-accumulate internally, bf16 outputs). * libxsmm_bsmmfunction_reducebatch_strd kernel = libxsmm_bsmmdispatch_reducebatch_strd(m, n, k, stride_a*sizeof(libxsmm_bfloat16), stride_b*sizeof(libxsmm_bfloat16), NULL, NULL, NULL, &alpha, &beta, &flags, NULL); PCL_ASSERT(kernel, "Null brgemm bf16 kernel\n"); kernel(A, B, C, &l_br); } inline void brgemm_bf16_bf16(long n, long m, long k, long stride_b, long stride_a, libxsmm_bfloat16 *B_, libxsmm_bfloat16 *A_, libxsmm_bfloat16 *C, long count, const float beta = 1.0, const char b_trans='n', const char a_trans='n', const char b_vnni='n', const char a_vnni='n') { const float alpha = 1.0; libxsmm_bfloat16 *A = A_; libxsmm_bfloat16 *B = B_; unsigned long long l_br = count; int flags = LIBXSMM_GEMM_VNNI_FLAGS('n', 'n', 'v', 'n'); // Query or JIT-generate reduction kernel; returns NULL if JIT is not supported (bf16 inputs, fp32-accumulate internally, bf16 outputs). * libxsmm_bmmfunction_reducebatch_strd kernel = libxsmm_bmmdispatch_reducebatch_strd(m, n, k, stride_a*sizeof(libxsmm_bfloat16), stride_b*sizeof(libxsmm_bfloat16), NULL, NULL, NULL, &alpha, &beta, &flags, NULL); PCL_ASSERT(kernel, "Null brgemm bf16 kernel\n"); kernel(A, B, C, &l_br); } inline void delbias_bf16_f32(int N, int M, int ldo, int ldi, libxsmm_meltw_unary_param *delbias_params) { libxsmm_meltwfunction_unary delbias_kernel = libxsmm_dispatch_meltw_unary(M, N, &ldi, &ldo, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT); if (delbias_kernel == NULL ) { printf("Could not create bf16 delbias kernel.. bailing\n"); exit(-1); } delbias_kernel(delbias_params); } class MLPCell_BF16 { public: MLPCell_BF16(int N, int C, int K, int bn, int bc, int bk, bool bias, bool skip, int act, bool norm, float p, bool train) { pN = N; pC = C; pK = K; pbn = bn; pbc = bc; pbk = bk; pbias = bias; pskip = skip; pact = act; pnorm = norm; pp = p; ptrain = train; //printf("MLPCell: N = %d, C = %d, K = %d, bf = %d, bias = %d, skip = %d, act = %d, norm = %d, dropout prob = %.2f train = %d\n", N, C, K, bias, skip, act, norm, p, train); } std::vector<at::Tensor> fwd(std::vector<at::Tensor> inputs) { long bn = pbn; long bc = pbc; long bk = pbk; long nn = pN/bn; long nc = pC; long nk = pK; long rn = pN % bn; long in_off = nn*nc*bn*bc; long out_off = nn*nk*bn*bk; long C = nc*bc; long K = nk*bk; // std::cout << "BF16--------------> " << std::endl; long bcp = (bc % 2 != 0) ? (bc + 1): bc; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param cvt_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_flags dropout_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; libxsmm_meltw_binary_param add_params; libxsmm_meltw_binary_flags binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; libxsmm_meltw_binary_type binary_type = LIBXSMM_MELTW_TYPE_BINARY_ADD; int i=0; at::Tensor t_input_l = inputs[i++]; at::Tensor t_input_r = inputs[i++]; at::Tensor t_weights_l = inputs[i++]; at::Tensor t_weights_r = inputs[i++]; at::Tensor t_bias_l = inputs[i++]; at::Tensor t_bias_r = inputs[i++]; at::Tensor t_output = t_input_l.new_empty({pN, K}); int dd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; at::Tensor t_dropout_mask_bN, t_dropout_mask_rN; if(ptrain && pp > 0) { int size = nn*nk*bn*bk; t_dropout_mask_bN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); if(rn > 0) { size = nk*rn*bk; t_dropout_mask_rN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); } } __mmask32 (*dropout_mask_bN)[nk][bn][dd] = (ptrain && pp > 0) ? (__mmask32 (*)[nk][bn][dd])(t_dropout_mask_bN.data_ptr()) : NULL; __mmask32 (*dropout_mask_rN)[nk][rn][dd] = (ptrain && pp > 0 && rn > 0) ? (__mmask32 (*)[nk][rn][dd])(t_dropout_mask_rN.data_ptr()) : NULL; int rd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; at::Tensor t_relumask_bN, t_relumask_rN; if(pact==1) { int size = nn*nk*bn*bk; t_relumask_bN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); if(rn > 0) { size = nk*rn*bk; t_relumask_rN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); } } __mmask32 (*relumask_bN)[nk][bn][rd] = pact==1 ? (__mmask32 (*)[nk][bn][rd])(t_relumask_bN.data_ptr()) : NULL; __mmask32 (*relumask_rN)[nk][rn][rd] = (pact==1 && rn > 0) ? (__mmask32 (*)[nk][rn][rd])(t_relumask_rN.data_ptr()) : NULL; int threads = 1; #ifdef _OPENMP threads = omp_get_max_threads(); #endif long wts = nk*nc*bk*bcp; long in_bn = threads*nc*bn*bc; long in_rn = nc*rn*bc; long out_bn = threads*nk*bn*bk; long out_rn = nk*rn*bk; long scratch_size; if(pskip) scratch_size = (wts*6 + in_bn*2 + in_rn*2 + out_bn*3 + out_rn*3 + K*2)*sizeof(libxsmm_bfloat16); else scratch_size = (wts*3 + in_bn + in_rn + out_bn + out_rn + K)*sizeof(libxsmm_bfloat16); void *scratch = libxsmm_aligned_malloc(scratch_size, 2097152); libxsmm_bfloat16 *t_bf16_weights_l = (libxsmm_bfloat16*)scratch; libxsmm_bfloat16 *t_tr_weights_l = t_bf16_weights_l + wts; libxsmm_bfloat16 *t_vnni_weights_l = t_tr_weights_l + wts; libxsmm_bfloat16 *t_input_bN_l = t_vnni_weights_l + wts; libxsmm_bfloat16 *t_output_bN_l = t_input_bN_l + in_bn; libxsmm_bfloat16 *t_output_bN = t_output_bN_l + out_bn; libxsmm_bfloat16 *t_bf16_bias_l = t_output_bN + out_bn; libxsmm_bfloat16 *t_input_rN_l=NULL, *t_output_rN_l=NULL, *t_output_rN=NULL; if(rn > 0) { t_input_rN_l = t_bf16_bias_l + K; t_output_rN_l = t_input_rN_l + in_rn; t_output_rN = t_output_rN_l + out_rn; } libxsmm_bfloat16 *t_bf16_weights_r=NULL, *t_tr_weights_r=NULL, *t_vnni_weights_r=NULL, *t_input_bN_r=NULL, *t_output_bN_r=NULL, *t_bf16_bias_r=NULL; libxsmm_bfloat16 *t_input_rN_r=NULL, *t_output_rN_r=NULL; if(pskip) { if(rn > 0) t_bf16_weights_r = t_output_rN + out_rn; else t_bf16_weights_r = t_bf16_bias_l + K; t_tr_weights_r = t_bf16_weights_r + wts; t_vnni_weights_r = t_tr_weights_r + wts; t_input_bN_r = t_vnni_weights_r + wts; t_output_bN_r = t_input_bN_r + in_bn; t_bf16_bias_r = t_output_bN_r + out_bn; if(rn > 0) { t_input_rN_r = t_bf16_bias_r + K; t_output_rN_r = t_input_rN_r + in_rn; } } DECL_VLA_PTR_PT(float, wt_f32_l, [C], t_weights_l); float *bias_f32_l = t_bias_l.data_ptr<float>(); float (*wt_f32_r)[C] = pskip ? (float (*)[C])t_weights_r.data_ptr<float>() : NULL; float *bias_f32_r = pskip ? t_bias_r.data_ptr<float>() : NULL; DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, input_l, [C], t_input_l); DECL_VLA_PTR_NPT_CHECK_COND(pskip, libxsmm_bfloat16, at::BFloat16, input_r, [C], t_input_r); DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, output, [K], t_output); DECL_VLA_PTR(libxsmm_bfloat16, wt_l, [nc][bk][bc], t_bf16_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, tr_wt_l, [nc][bcp][bk], t_tr_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, vnni_wt_l, [nc][bcp/2][bk][2], t_vnni_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, input_bN_l, [nc][bn][bc], t_input_bN_l); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, input_rN_l, [nc][rn][bc], t_input_rN_l); DECL_VLA_PTR(libxsmm_bfloat16, output_bN, [nk][bn][bk], t_output_bN); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, output_rN, [nk][rn][bk], t_output_rN); DECL_VLA_PTR(libxsmm_bfloat16, bias_l, [bk], t_bf16_bias_l); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, wt_r, [nc][bk][bc], t_bf16_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, tr_wt_r, [nc][bcp][bk], t_tr_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, vnni_wt_r, [nc][bcp/2][bk][2], t_vnni_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_bN_r, [nc][bn][bc], t_input_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_rN_r, [nc][rn][bc], t_input_rN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, output_bN_l, [nk][bn][bk], t_output_bN_l); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, output_bN_r, [nk][bn][bk], t_output_bN_r); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, output_rN_l, [nk][rn][bk], t_output_rN_l); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, output_rN_r, [nk][rn][bk], t_output_rN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, bias_r, [bk], t_bf16_bias_r); // Get BF16 copy of weights for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_l[k*bk][c*bc]; cvt_params.out.primary = &wt_l[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } if(pskip) { for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_r[k*bk][c*bc]; cvt_params.out.primary = &wt_r[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } } for(int k=0; k<nk; k++) { cvt_params.in.primary = bias_f32_l; cvt_params.out.primary = &bias_l[k]; cvt_f32_bf16(nk, bk, &cvt_params); } if(pskip) { for(int k=0; k<nk; k++) { cvt_params.in.primary = bias_f32_r; cvt_params.out.primary = &bias_r[k]; cvt_f32_bf16(nk, bk, &cvt_params); } } // Wt: NORM layout to VNNI norm_to_normT_16b(wt_l[0][0][0], tr_wt_l[0][0][0], bk, bcp); norm_to_vnni_16b(tr_wt_l[0][0][0], vnni_wt_l[0][0][0][0], bcp, bk); if(pskip) { norm_to_normT_16b(wt_r[0][0][0], tr_wt_r[0][0][0], bk, bcp); norm_to_vnni_16b(tr_wt_r[0][0][0], vnni_wt_r[0][0][0][0], bcp, bk); } #ifdef _OPENMP #pragma omp parallel #endif { int tid = omp_get_thread_num(); int threads = omp_get_max_threads(); int jobs = (nn % threads == 0) ? nn/threads : nn/threads + 1; int tb = (tid*jobs < nn) ? tid*jobs : nn; int te = ((tid+1)*jobs < nn) ? (tid+1)*jobs : nn; int count = nc; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_binary_param add_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_flags dropout_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; for(int m=tb; m<te; m++) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[m*bn][c*bc]; copy_params.out.primary = &input_bN_l[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } if(pskip) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[m*bn][c*bc]; copy_params.out.primary = &input_bN_r[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &bias_l[k][0]; copy_params.out.primary = &output_bN_l[tid][k]; colbcast_bf16_copy(bn, bk, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &bias_r[k][0]; copy_params.out.primary = &output_bN_r[tid][k]; colbcast_bf16_copy(bn, bk, &copy_params); } } else { for(int k=0; k<nk; k++) { copy_params.in.primary = &bias_l[k][0]; copy_params.out.primary = &output_bN[tid][k]; colbcast_bf16_copy(bn, bk, &copy_params); } } if(pskip) { brgemm_bf16_bf16(bn, bk, bcp, bn*bk, 0, input_bN_l[tid][0][0], vnni_wt_l[0][0][0][0], output_bN_l[tid][0][0], count); brgemm_bf16_bf16(bn, bk, bcp, bn*bk, 0, input_bN_r[tid][0][0], vnni_wt_r[0][0][0][0], output_bN_r[tid][0][0], count); add_params.in0.primary = (void*)&output_bN_l[tid][0]; add_params.in1.primary = (void*)&output_bN_r[tid][0]; add_params.out.primary = (void*)&output_bN[tid][0]; add_bf16_bf16(bn, bk, &add_params); } else brgemm_bf16_bf16(bn, bk, bcp, bn*bk, 0, input_bN_l[tid][0][0], vnni_wt_l[0][0][0][0], output_bN[tid][0][0], count); if(pact == 1) { for(int k=0; k<nk; k++) { relu_params.in.primary = &output_bN[tid][k]; relu_params.out.primary = &output_bN[tid][k]; relu_params.out.secondary = &relumask_bN[m][k]; relu_fwd_bf16(bn, bk, &relu_params); } } if(ptrain && pp > 0) { for(int k=0; k<nk; k++) { dropout_params.in.primary = &output_bN[tid][k]; dropout_params.in.tertiary = &pp; dropout_params.in.secondary = rnd_state; dropout_params.out.primary = &output_bN[tid][k]; dropout_params.out.secondary = &dropout_mask_bN[m][k]; dropout_bf16(bn, bk, &dropout_params, dropout_flags); } } for(int k=0; k<nk; k++) { copy_params.in.primary = &output_bN[tid][k]; copy_params.out.primary = &output[m*bn][k*bk]; bf16_copy(bn, bk, nk*bk, nk*bk, &copy_params); } } } if(rn > 0) { // Single-threaded part of compute // for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[nn*bn][c*bc]; copy_params.out.primary = &input_rN_l[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } if(pskip) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[nn*bn][c*bc]; copy_params.out.primary = &input_rN_r[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = bias_l; copy_params.out.primary = &output_rN_l[0][k]; colbcast_bf16_copy(rn, bk, &copy_params); copy_params.in.primary = bias_r; copy_params.out.primary = &output_rN_r[0][k]; colbcast_bf16_copy(rn, bk, &copy_params); } } else { for(int k=0; k<nk; k++) { copy_params.in.primary = bias_l; copy_params.out.primary = &output_rN[0][k]; colbcast_bf16_copy(rn, bk, &copy_params); } } int count = nc; if(pskip) { brgemm_bf16_bf16(rn, bk, bcp, rn*bk, 0, input_rN_l[0][0][0], vnni_wt_l[0][0][0][0], output_rN_l[0][0][0], count); brgemm_bf16_bf16(rn, bk, bcp, rn*bk, 0, input_rN_r[0][0][0], vnni_wt_r[0][0][0][0], output_rN_r[0][0][0], count); add_params.in0.primary = (void*)&output_rN_l[0][0]; add_params.in1.primary = (void*)&output_rN_r[0][0]; add_params.out.primary = (void*)&output_rN[0][0]; add_bf16_bf16(rn, bk, &add_params); } else brgemm_bf16_bf16(rn, bk, bcp, rn*bk, 0, input_rN_l[0][0][0], vnni_wt_l[0][0][0][0], output_rN[0][0][0], count); if(pact == 1) { for(int k=0; k<nk; k++) { relu_params.in.primary = &output_rN[0][k]; relu_params.out.primary = &output_rN[0][k]; relu_params.out.secondary = &relumask_rN[0][k]; relu_fwd_bf16(rn, bk, &relu_params); } } if(ptrain && pp > 0) { for(int k=0; k<nk; k++) { dropout_params.in.primary = &output_rN[0][k]; dropout_params.in.secondary = rnd_state; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &output_rN[0][k]; dropout_params.out.secondary = &dropout_mask_rN[0][k]; dropout_bf16(rn, bk, &dropout_params, dropout_flags); } } for(int k=0; k<nk; k++) { copy_params.in.primary = &output_rN[0][k]; copy_params.out.primary = &output[nn*bn][k*bk]; bf16_copy(rn, bk, nk*bk, nk*bk, &copy_params); } } libxsmm_free((void*)scratch); return {t_output, t_relumask_bN, t_relumask_rN, t_dropout_mask_bN, t_dropout_mask_rN}; } ////===================================================================================================================================================== //// ====================== BackPass Function =========================== ////===================================================================================================================================================== std::vector<at::Tensor> bwd(std::vector<at::Tensor> inputs) { long bn = pbn; long bc = pbc; long bk = pbk; long nn = pN/bn; long nc = pC; long nk = pK; long rn = pN % bn; long K = nk*bk; long C = nc*bc; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_param delbias_params; libxsmm_meltw_unary_param cvt_params; libxsmm_meltw_unary_flags dropout_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT; int threads = 1; #ifdef _OPENMP threads = omp_get_max_threads(); #endif // ---------------- zero Padding to handle brgemm reduction ------------- long bnp = (bn % 2 != 0) ? (bn + 1): bn; long rnp = (rn % 2 != 0) ? (rn + 1): rn; long bkp = (bk % 2 != 0) ? (bk + 1): bk; // ---------------------------------------------------------------------- int i=0; at::Tensor t_grad_output = inputs[i++]; at::Tensor t_input_l = inputs[i++]; at::Tensor t_input_r = inputs[i++]; at::Tensor t_weights_l = inputs[i++]; at::Tensor t_weights_r = inputs[i++]; at::Tensor t_relumask_bN = inputs[i++]; at::Tensor t_relumask_rN = inputs[i++]; at::Tensor t_dropout_mask_bN = inputs[i++]; at::Tensor t_dropout_mask_rN = inputs[i++]; at::Tensor t_grad_weights_l = t_weights_l.new_empty({nk, nc, bk, bc}); at::Tensor t_grad_bias_l = t_weights_l.new_empty(K); at::Tensor t_grad_input_l = t_input_l.new_empty({pN, C}); at::Tensor t_grad_weights_r, t_grad_bias_r, t_grad_input_r; if(pskip) { t_grad_weights_r = t_weights_r.new_empty({nk, nc, bk, bc}); t_grad_bias_r = t_weights_r.new_empty(K); t_grad_input_r = t_input_r.new_empty({pN, C}); } long wts = nk*nc*bkp*bc; long go_bn_k = threads*nk*bn*bkp; long go_rn_k = nk*rn*bkp; long go_bn_n = threads*nk*bnp*bk; long go_rn_n = nk*rnp*bk; long gi_bn = threads*nc*bn*bc; long gi_rn = nc*rn*bc; long tr_go_bn = threads*nk*bnp*bk; long tr_go_rn = nk*rnp*bk; long in_v_bn = threads*nc*bnp*bc; long in_v_rn = nc*rnp*bc; long in_bn = threads*nc*bn*bc; long in_rn = nc*rn*bc; long scratch_size; if(pskip) scratch_size = (wts*4 + go_bn_k + go_rn_k + go_bn_n + go_rn_n + gi_bn*2 + gi_rn*2 + tr_go_bn + tr_go_rn + in_v_bn*2 + in_v_rn*2 + in_bn*2 + in_rn*2)*sizeof(libxsmm_bfloat16) + (nk*nc*bk*bc*2)*sizeof(float); else scratch_size = (wts*2 + go_bn_k + go_rn_k + go_bn_n + go_rn_n + gi_bn + gi_rn + tr_go_bn + tr_go_rn + in_v_bn + in_v_rn + in_bn + in_rn)*sizeof(libxsmm_bfloat16) + (nk*nc*bk*bc)*sizeof(float); void *scratch = libxsmm_aligned_malloc(scratch_size, 2097152); libxsmm_bfloat16* t_grad_output_bN_K = (libxsmm_bfloat16*)scratch; libxsmm_bfloat16* t_grad_output_bN_N = t_grad_output_bN_K + go_bn_k; libxsmm_bfloat16* t_tr_grad_output_bN = t_grad_output_bN_N + go_bn_n; libxsmm_bfloat16* t_input_vnni_bN_l = t_tr_grad_output_bN + tr_go_bn; libxsmm_bfloat16* t_grad_input_bN_l = t_input_vnni_bN_l + in_v_bn; libxsmm_bfloat16* t_input_bN_l = t_grad_input_bN_l + gi_bn; libxsmm_bfloat16* t_vnni_weights_l = t_input_bN_l + in_bn; libxsmm_bfloat16* t_bf16_weights_l = t_vnni_weights_l + wts; float* t_f32_grad_wt_l = (float*)(t_bf16_weights_l + wts); libxsmm_bfloat16 *t_grad_output_rN_K=NULL, *t_grad_output_rN_N=NULL, *t_tr_grad_output_rN=NULL, *t_input_vnni_rN_l=NULL, *t_grad_input_rN_l=NULL; libxsmm_bfloat16 *t_input_rN_l=NULL; if(rn > 0) { t_grad_output_rN_K = (libxsmm_bfloat16*)(t_f32_grad_wt_l + wts); t_grad_output_rN_N = t_grad_output_rN_K + go_rn_k; t_tr_grad_output_rN = t_grad_output_rN_N + go_rn_n; t_input_vnni_rN_l = t_tr_grad_output_rN + tr_go_rn; t_grad_input_rN_l = t_input_vnni_rN_l + in_v_rn; t_input_rN_l = t_grad_input_rN_l + gi_rn; } libxsmm_bfloat16* t_input_vnni_bN_r=NULL, *t_grad_input_bN_r=NULL, *t_input_bN_r=NULL; libxsmm_bfloat16* t_vnni_weights_r=NULL, *t_bf16_weights_r=NULL, *t_input_vnni_rN_r=NULL, *t_grad_input_rN_r=NULL, *t_input_rN_r=NULL; float *t_f32_grad_wt_r=NULL; if(pskip) { if(rn > 0) t_input_vnni_bN_r = t_input_rN_l + in_rn; else t_input_vnni_bN_r = (libxsmm_bfloat16*)(t_f32_grad_wt_l + wts); t_grad_input_bN_r = t_input_vnni_bN_r + in_v_bn; t_input_bN_r = t_grad_input_bN_r + gi_bn; t_vnni_weights_r = t_input_bN_r + in_bn; t_bf16_weights_r = t_vnni_weights_r + wts; t_f32_grad_wt_r = (float*)(t_bf16_weights_r + wts); if(rn > 0) { t_input_vnni_rN_r = (libxsmm_bfloat16*)(t_f32_grad_wt_r + wts); t_grad_input_rN_r = t_input_vnni_rN_r + in_v_rn; t_input_rN_r = t_grad_input_rN_r + gi_rn; } } DECL_VLA_PTR_PT(float, wt_f32_l, [C], t_weights_l); DECL_VLA_PTR_PT(float, grad_wt_l, [C], t_grad_weights_l); float (*wt_f32_r)[C] = pskip ? (float (*)[C])t_weights_r.data_ptr<float>() : NULL; float (*grad_wt_r)[C] = pskip ? (float (*)[C])t_grad_weights_r.data_ptr<float>() : NULL; DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, grad_output, [K], t_grad_output); DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, input_l, [C], t_input_l); DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, grad_input_l, [C], t_grad_input_l); DECL_VLA_PTR_NPT_CHECK_COND(pskip, libxsmm_bfloat16, at::BFloat16, input_r, [C], t_input_r); DECL_VLA_PTR_NPT_CHECK_COND(pskip, libxsmm_bfloat16, at::BFloat16, grad_input_r, [C], t_grad_input_r); DECL_VLA_PTR(libxsmm_bfloat16, grad_output_bN_K, [nk][bn][bkp], t_grad_output_bN_K); DECL_VLA_PTR(libxsmm_bfloat16, grad_output_bN_N, [nk][bnp][bk], t_grad_output_bN_N); DECL_VLA_PTR(libxsmm_bfloat16, tr_grad_output_bN, [nk][bk][bnp], t_tr_grad_output_bN); DECL_VLA_PTR(libxsmm_bfloat16, input_vnni_bN_l, [nc][bnp/2][bc][2], t_input_vnni_bN_l); DECL_VLA_PTR(libxsmm_bfloat16, grad_input_bN_l, [nc][bn][bc], t_grad_input_bN_l); DECL_VLA_PTR(libxsmm_bfloat16, input_bN_l, [nc][bn][bc], t_input_bN_l); DECL_VLA_PTR(libxsmm_bfloat16, vnni_wt_l, [nc][bkp/2][bc][2], t_vnni_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, wt_l, [nc][bk][bc], t_bf16_weights_l); DECL_VLA_PTR(float, grad_wt_f32_l, [nc][bk][bc], t_f32_grad_wt_l); float *grad_bias_l = t_grad_bias_l.data_ptr<float>(); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_vnni_bN_r, [nc][bnp/2][bc][2], t_input_vnni_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, grad_input_bN_r, [nc][bn][bc], t_grad_input_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_bN_r, [nc][bn][bc], t_input_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, vnni_wt_r, [nc][bkp/2][bc][2], t_vnni_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, wt_r, [nc][bk][bc], t_bf16_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, float, grad_wt_f32_r, [nc][bk][bc], t_f32_grad_wt_r); float *grad_bias_r = pskip ? t_grad_bias_r.data_ptr<float>() : NULL; DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, grad_output_rN_K, [nk][rn][bkp], t_grad_output_rN_K); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, grad_output_rN_N, [nk][rnp][bk], t_grad_output_rN_N); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, tr_grad_output_rN, [nk][bk][rnp], t_tr_grad_output_rN); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, input_vnni_rN_l, [nc][rnp/2][bc][2], t_input_vnni_rN_l); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, grad_input_rN_l, [nc][rn][bc], t_grad_input_rN_l); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, input_rN_l, [nc][rn][bc], t_input_rN_l); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, input_vnni_rN_r, [nc][rnp/2][bc][2], t_input_vnni_rN_r); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, grad_input_rN_r, [nc][rn][bc], t_grad_input_rN_r); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, input_rN_r, [nc][rn][bc], t_input_rN_r); int dd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; int rd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; __mmask32 (*dropout_mask_bN)[nk][bn][dd] = (ptrain && pp > 0) ? (__mmask32 (*)[nk][bn][dd])(t_dropout_mask_bN.data_ptr()) : NULL; __mmask32 (*dropout_mask_rN)[nk][rn][dd] = (ptrain && pp > 0 && rn > 0) ? (__mmask32 (*)[nk][rn][dd])(t_dropout_mask_rN.data_ptr()) : NULL; __mmask32 (*relumask_bN)[nk][bn][rd] = pact==1 ? (__mmask32 (*)[nk][bn][rd])(t_relumask_bN.data_ptr()) : NULL; __mmask32 (*relumask_rN)[nk][rn][rd] = (pact==1 && rn > 0) ? (__mmask32 (*)[nk][rn][rd])(t_relumask_rN.data_ptr()) : NULL; copy_params.out.primary = t_f32_grad_wt_l; zero(K*C, &copy_params); copy_params.out.primary = t_grad_weights_l.data_ptr<float>(); zero(K*C, &copy_params); copy_params.out.primary = t_grad_bias_l.data_ptr<float>(); zero(K, &copy_params); if(pskip) { copy_params.out.primary = t_f32_grad_wt_r; zero(K*C, &copy_params); } if(pskip) { copy_params.out.primary = t_grad_weights_r.data_ptr<float>(); zero(K*C, &copy_params); copy_params.out.primary = t_grad_bias_r.data_ptr<float>(); zero(K, &copy_params); } // Get BF16 copy of weights for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_l[k*bk][c*bc]; cvt_params.out.primary = &wt_l[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } int count = nk; norm_to_vnni_16b(wt_l[0][0][0], vnni_wt_l[0][0][0][0], bkp, bc); //bk x bc --> bkp/2 x bc x 2 if(pskip) { for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_r[k*bk][c*bc]; cvt_params.out.primary = &wt_r[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } int count = nk; norm_to_vnni_16b(wt_r[0][0][0], vnni_wt_r[0][0][0][0], bkp, bc); //bk x bc --> bkp/2 x bc x 2 } if(pskip) { #ifdef _OPENMP #pragma omp parallel reduction(+: grad_wt_f32_l[:nk][:nc][:bk][:bc], grad_bias_l[:K], grad_wt_f32_r[:nk][:nc][:bk][:bc], grad_bias_r[:K]) #endif { int tid = omp_get_thread_num(); int threads = omp_get_max_threads(); int jobs = (nn % threads == 0) ? nn/threads : nn/threads + 1; int tb = (tid*jobs < nn) ? tid*jobs : nn; int te = ((tid+1)*jobs < nn) ? (tid+1)*jobs : nn; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; for(int m=tb; m<te; m++) { for(int k=0; k<nk; k++) { if(ptrain && pp > 0) { dropout_params.in.primary = &grad_output[m*bn][k*bk]; dropout_params.in.secondary = &dropout_mask_bN[m][k][0][0]; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &grad_output[m*bn][k*bk]; dropout_bwd_bf16(bn, bk, &dropout_params, dropout_flags); } if(pact == 1) { relu_params.in.primary = &grad_output[m*bn][k*bk]; relu_params.in.secondary = &relumask_bN[m][k][0][0]; relu_params.out.primary = &grad_output[m*bn][k*bk]; relu_bwd_bf16(bn, bk, &relu_params); } copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_K[tid][k]; bf16_copy(bn, bk, nk*bkp, nk*bk, &copy_params); } brgemm_bf16_bf16(bn, bc, bkp, bn*bkp, 0, grad_output_bN_K[tid][0][0], vnni_wt_l[0][0][0][0], grad_input_bN_l[tid][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_bN_l[tid][c]; copy_params.out.primary = &grad_input_l[m*bn][c*bc]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } brgemm_bf16_bf16(bn, bc, bkp, bn*bkp, 0, grad_output_bN_K[tid][0][0], vnni_wt_r[0][0][0][0], grad_input_bN_r[tid][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_bN_r[tid][c]; copy_params.out.primary = &grad_input_r[m*bn][c*bc]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_N[tid][k]; bf16_copy(bnp, bk, nk*bk, nk*bk, &copy_params); norm_to_normT_16b(grad_output_bN_N[tid][k][0], tr_grad_output_bN[tid][k][0], bnp, bk); } for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[m*bn][c*bc]; copy_params.out.primary = &input_bN_l[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); norm_to_vnni_16b(input_bN_l[tid][c][0], input_vnni_bN_l[tid][c][0][0], bnp, bc); } count = 1; brgemm_bf16_f32(bk, bc, bnp, bnp*bk, bnp*bc, tr_grad_output_bN[tid][0][0], input_vnni_bN_l[tid][0][0][0], grad_wt_f32_l[0][0][0], count, 1.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[m*bn][c*bc]; copy_params.out.primary = &input_bN_r[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); norm_to_vnni_16b(input_bN_r[tid][c][0], input_vnni_bN_r[tid][c][0][0], bnp, bc); } count = 1; brgemm_bf16_f32(bk, bc, bnp, bnp*bk, bnp*bc, tr_grad_output_bN[tid][0][0], input_vnni_bN_r[tid][0][0][0], grad_wt_f32_r[0][0][0], count, 1.0); for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_bN_N[tid][k]; delbias_params.out.primary = grad_bias_l; delbias_bf16_f32(bn, bk, bn, bk, &delbias_params); } copy_params.in.primary = grad_bias_l; copy_params.out.primary = grad_bias_r; f32_copy(1, K, K, K, &copy_params); } } } else { #ifdef _OPENMP #pragma omp parallel reduction(+: grad_wt_f32_l[:nk][:nc][:bk][:bc], grad_bias_l[:K]) #endif { int tid = omp_get_thread_num(); int threads = omp_get_max_threads(); int jobs = (nn % threads == 0) ? nn/threads : nn/threads + 1; int tb = (tid*jobs < nn) ? tid*jobs : nn; int te = ((tid+1)*jobs < nn) ? (tid+1)*jobs : nn; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; for(int m=tb; m<te; m++) { for(int k=0; k<nk; k++) { if(ptrain && pp > 0) { dropout_params.in.primary = &grad_output[m*bn][k*bk]; dropout_params.in.secondary = &dropout_mask_bN[m][k][0][0]; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &grad_output[m*bn][k*bk]; dropout_bwd_bf16(bn, bk, &dropout_params, dropout_flags); } if(pact == 1) { relu_params.in.primary = &grad_output[m*bn][k*bk]; relu_params.in.secondary = &relumask_bN[m][k][0][0]; relu_params.out.primary = &grad_output[m*bn][k*bk]; relu_bwd_bf16(bn, bk, &relu_params); } copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_K[tid][k]; bf16_copy(bn, bk, nk*bkp, nk*bk, &copy_params); } brgemm_bf16_bf16(bn, bc, bkp, bn*bkp, 0, grad_output_bN_K[tid][0][0], vnni_wt_l[0][0][0][0], grad_input_bN_l[tid][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_bN_l[tid][c]; copy_params.out.primary = &grad_input_l[m*bn][c*bc]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_N[tid][k]; bf16_copy(bnp, bk, nk*bk, nk*bk, &copy_params); norm_to_normT_16b(grad_output_bN_N[tid][k][0], tr_grad_output_bN[tid][k][0], bnp, bk); } for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[m*bn][c*bc]; copy_params.out.primary = &input_bN_l[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); norm_to_vnni_16b(input_bN_l[tid][c][0], input_vnni_bN_l[tid][c][0][0], bnp, bc); } count = 1; brgemm_bf16_f32(bk, bc, bnp, bnp*bk, bnp*bc, tr_grad_output_bN[tid][0][0], input_vnni_bN_l[tid][0][0][0], grad_wt_f32_l[0][0][0], count, 1.0); for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_bN_N[tid][k]; delbias_params.out.primary = grad_bias_l; delbias_bf16_f32(bn, bk, bn, bk, &delbias_params); } } } } if(rn > 0) { //Single-thread portion of code-------------------------- // Dropout if(ptrain && pp > 0) { for(int k=0; k<nk; k++) { dropout_params.in.primary = &grad_output[nn*bn][k*bk]; dropout_params.in.secondary = &dropout_mask_rN[0][k][0][0]; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &grad_output[nn*bn][k*bk]; dropout_bwd_bf16(rn, bk, &dropout_params, dropout_flags); } } // ReLU if(pact == 1) { for(int k=0; k<nk; k++) { relu_params.in.primary = &grad_output[nn*bn][k*bk]; relu_params.in.secondary = &relumask_rN[0][k][0][0]; relu_params.out.primary = &grad_output[nn*bn][k*bk]; relu_bwd_bf16(rn, bk, &relu_params); } } //grad-input for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[nn*bn][k*bk]; copy_params.out.primary = &grad_output_rN_K[0][k]; bf16_copy(rn, bk, nk*bkp, nk*bk, &copy_params); } brgemm_bf16_bf16(rn, bc, bkp, rn*bkp, 0, grad_output_rN_K[0][0][0], vnni_wt_l[0][0][0][0], grad_input_rN_l[0][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_rN_l[0][c]; copy_params.out.primary = &grad_input_l[nn*bn][c*bc]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } if(pskip) { brgemm_bf16_bf16(rn, bc, bkp, rn*bkp, 0, grad_output_rN_K[0][0][0], vnni_wt_r[0][0][0][0], grad_input_rN_r[0][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_rN_r[0][c]; copy_params.out.primary = &grad_input_r[nn*bn][c*bc]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } } //grad-weights for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[nn*bn][k*bk]; copy_params.out.primary = &grad_output_rN_N[0][k]; bf16_copy(rn, bk, nk*bk, nk*bk, &copy_params); norm_to_normT_16b(grad_output_rN_N[0][k][0], tr_grad_output_rN[0][k][0], rnp, bk); } for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[nn*bn][c*bc]; copy_params.out.primary = &input_rN_l[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } for(int c=0; c<nc; c++) norm_to_vnni_16b(input_rN_l[0][c][0], input_vnni_rN_l[0][c][0][0], rnp, bc); count = 1; brgemm_bf16_f32(bk, bc, rnp, rnp*bk, rnp*bc, tr_grad_output_rN[0][0][0], input_vnni_rN_l[0][0][0][0], grad_wt_f32_l[0][0][0], count, 1.0); if(pskip) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[nn*bn][c*bc]; copy_params.out.primary = &input_rN_r[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } for(int c=0; c<nc; c++) norm_to_vnni_16b(input_rN_r[0][c][0], input_vnni_rN_r[0][c][0][0], rnp, bc); count = 1; brgemm_bf16_f32(bk, bc, rnp, rnp*bk, rnp*bc, tr_grad_output_rN[0][0][0], input_vnni_rN_r[0][0][0][0], grad_wt_f32_r[0][0][0], count, 1.0); } for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_rN_N[0][k]; delbias_params.out.primary = grad_bias_l; delbias_bf16_f32(rn, bk, rn, bk, &delbias_params); } if(pskip) { for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_rN_N[0][k]; delbias_params.out.primary = grad_bias_r; delbias_bf16_f32(rn, bk, rn, bk, &delbias_params); } } } for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_wt_f32_l[k][c]; copy_params.out.primary = &grad_wt_l[k*bk][c*bc]; f32_copy(bk, bc, nc*bc, nc*bc, &copy_params); } } if(pskip) { for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_wt_f32_r[k][c]; copy_params.out.primary = &grad_wt_r[k*bk][c*bc]; f32_copy(bk, bc, nc*bc, nc*bc, &copy_params); } } } libxsmm_free(scratch); return {t_grad_input_l, t_grad_input_r, t_grad_weights_l, t_grad_weights_r, t_grad_bias_l, t_grad_bias_r}; } bool has_bias() {return pbias;} bool has_skip() {return pskip;} bool has_norm() {return pnorm;} private: long pN; long pC; long pK; long pbn; long pbc; long pbk; bool pbias; bool pskip; int pact; bool pnorm; float pp; bool ptrain; }; #endif
mlpcell_bf16.h
#ifndef MLPCELL_BF16 #define MLPCELL_BF16 #include "mc_funcs.h" #define PCL_ASSERT(cond, x...) do { if(!(cond)) { printf(x); fflush(stdout); exit(1); } } while(0) #define DECL_VLA_PTR(type, name, dims, ptr) type (*name)dims = (type (*)dims)ptr #define DECL_VLA_PTR_CHECK_VAR(var, type, name, dims, ptr) type (*name)dims = (var > 0) ? (type (*)dims)ptr : NULL #define DECL_VLA_PTR_CHECK_COND(cond, type, name, dims, ptr) type (*name)dims = cond ? (type (*)dims)ptr : NULL #define DECL_VLA_PTR_CHECK_COND_VAR(cond, var, type, name, dims, ptr) type (*name)dims = (cond && var > 0) ? (type (*)dims)ptr : NULL #define DECL_VLA_PTR_PT(type, name, dims, t) type (*name)dims = (type (*)dims)(t.data_ptr<type>()) #define DECL_VLA_PTR_PT_CHECK_COND(cond, type, name, dims, t) type (*name)dims = cond ? (type (*)dims)(t.data_ptr<type>()) : NULL #define DECL_VLA_PTR_NPT(newtype, type, name, dims, t) newtype (*name)dims = (newtype (*)dims)(t.data_ptr<type>()) #define DECL_VLA_PTR_NPT_CHECK_COND(cond, newtype, type, name, dims, t) newtype (*name)dims = cond ? (newtype (*)dims)(t.data_ptr<type>()) : NULL #define LIBXSMM_ALIGNDOWN(N, A) ((N) & ~((A)-1)) //--------------------------------------norm_to_vnni----------------------------------------------------- // void norm_to_vnni_16b(libxsmm_bfloat16* in, libxsmm_bfloat16* out, int N, int M) { libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_type trans_type; trans_param.in.primary = (void*)in; trans_param.out.primary = (void*)out; if ( N % 2 == 1 ) { trans_type = LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI_PAD; } else { trans_type = LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI; } libxsmm_meltwfunction_unary trans_kernel = libxsmm_dispatch_meltw_unary(M, N, (libxsmm_blasint*)&M, (libxsmm_blasint*)&M, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, trans_type); if ( trans_kernel == NULL ) { fprintf( stderr, "JIT for NORM_TO_VNNI TPP. Bailing...!\n"); exit(-1); } trans_kernel( &trans_param ); } //--------------------------------------norm_to_normT----------------------------------------------------- // void norm_to_normT_16b(libxsmm_bfloat16* in, libxsmm_bfloat16* out, int N, int M) { libxsmm_meltw_unary_param trans_param; libxsmm_meltw_unary_type trans_type; trans_param.in.primary = (void*)in; trans_param.out.primary = (void*)out; trans_type = LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT; libxsmm_meltwfunction_unary trans_kernel = libxsmm_dispatch_meltw_unary(M, N, (libxsmm_blasint*)&M, (libxsmm_blasint*)&N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, trans_type); if ( trans_kernel == NULL ) { fprintf( stderr, "JIT for NORM_TO_NORMT TPP. Bailing...!\n"); exit(-1); } trans_kernel( &trans_param ); } //--------------------------------------------------convert f32 to bf16 TPP------------------------------------- inline void cvt_f32_bf16(int N, int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_meltwfunction_unary cvt_f32_bf16_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, unary_flags, unary_type ); PCL_ASSERT(cvt_f32_bf16_kernel, "Null cvt_f32_bf16 kernel"); cvt_f32_bf16_kernel(params); } inline void bf16_copy(int N, int M, int LDO, int LDI, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_BF16; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, (libxsmm_blasint*)&LDI, (libxsmm_blasint*)&LDO, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for bf16 to b16 copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void colbcast_bf16_copy(int N, int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_BF16; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for bf16 to b16 broadcast copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void bf16_f32_copy(int N, int M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type = LIBXSMM_MELTW_TYPE_UNARY_IDENTITY; libxsmm_datatype compute_dtype = LIBXSMM_DATATYPE_F32; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, compute_dtype, unary_flags, unary_type); if ( kernel == NULL ) { fprintf( stderr, "JIT for bf16 to f32 copy failed. Bailing...!\n"); exit(-1); } kernel(params); } inline void add_bf16_bf16(int N, int M, libxsmm_meltw_binary_param *binary_param) { libxsmm_meltw_binary_flags binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; libxsmm_meltw_binary_type binary_type = LIBXSMM_MELTW_TYPE_BINARY_ADD; libxsmm_meltwfunction_binary add_kernel = libxsmm_dispatch_meltw_binary(M, N, (libxsmm_blasint*)&M, (libxsmm_blasint*)&M, (libxsmm_blasint*)&N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, binary_flags, binary_type); if ( add_kernel == NULL ) { fprintf( stderr, "JIT for BINARY TPP. Bailing...!\n"); exit(-1); } add_kernel(binary_param); } inline void relu_fwd_bf16(long N, long M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK; libxsmm_meltwfunction_unary relu_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, unary_flags, LIBXSMM_MELTW_TYPE_UNARY_RELU); if ( relu_kernel == NULL ) { fprintf( stderr, "JIT for ReLU TPP. Bailing...!\n"); exit(-1); } relu_kernel( params ); } inline void relu_bwd_bf16(long N, long M, libxsmm_meltw_unary_param *params) { libxsmm_meltw_unary_flags unary_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK; libxsmm_meltwfunction_unary relu_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, unary_flags, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV); if ( relu_kernel == NULL ) { fprintf( stderr, "JIT for ReLU TPP. Bailing...!\n"); exit(-1); } relu_kernel( params ); } inline void dropout_bf16(long N, long M, libxsmm_meltw_unary_param *params, libxsmm_meltw_unary_flags flags) { libxsmm_meltwfunction_unary dropout_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, flags, LIBXSMM_MELTW_TYPE_UNARY_DROPOUT); if ( dropout_kernel == NULL ) { fprintf( stderr, "JIT for DROPOUT TPP. Bailing...!\n"); exit(-1); } dropout_kernel( params ); } inline void dropout_bwd_bf16(long N, long M, libxsmm_meltw_unary_param *params, libxsmm_meltw_unary_flags flags) { libxsmm_meltwfunction_unary dropout_kernel = libxsmm_dispatch_meltw_unary(M, N, NULL, NULL, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, flags, LIBXSMM_MELTW_TYPE_UNARY_DROPOUT_INV); if ( dropout_kernel == NULL ) { fprintf( stderr, "JIT for DROPOUT TPP. Bailing...!\n"); exit(-1); } dropout_kernel( params ); } inline void brgemm_bf16_f32(long n, long m, long k, long stride_b, long stride_a, libxsmm_bfloat16 *B_, libxsmm_bfloat16 *A_, float *C, long count, const float beta = 1.0, const char b_trans='n', const char a_trans='n', const char b_vnni='n', const char a_vnni='n') { const float alpha = 1.0; libxsmm_bfloat16 *A = A_; libxsmm_bfloat16 *B = B_; unsigned long long l_br = count; int flags = LIBXSMM_GEMM_VNNI_FLAGS('n', 'n', 'v', 'n'); // Query or JIT-generate reduction kernel; returns NULL if JIT is not supported (bf16 inputs, fp32-accumulate internally, bf16 outputs). * libxsmm_bsmmfunction_reducebatch_strd kernel = libxsmm_bsmmdispatch_reducebatch_strd(m, n, k, stride_a*sizeof(libxsmm_bfloat16), stride_b*sizeof(libxsmm_bfloat16), NULL, NULL, NULL, &alpha, &beta, &flags, NULL); PCL_ASSERT(kernel, "Null brgemm bf16 kernel\n"); kernel(A, B, C, &l_br); } inline void brgemm_bf16_bf16(long n, long m, long k, long stride_b, long stride_a, libxsmm_bfloat16 *B_, libxsmm_bfloat16 *A_, libxsmm_bfloat16 *C, long count, const float beta = 1.0, const char b_trans='n', const char a_trans='n', const char b_vnni='n', const char a_vnni='n') { const float alpha = 1.0; libxsmm_bfloat16 *A = A_; libxsmm_bfloat16 *B = B_; unsigned long long l_br = count; int flags = LIBXSMM_GEMM_VNNI_FLAGS('n', 'n', 'v', 'n'); // Query or JIT-generate reduction kernel; returns NULL if JIT is not supported (bf16 inputs, fp32-accumulate internally, bf16 outputs). * libxsmm_bmmfunction_reducebatch_strd kernel = libxsmm_bmmdispatch_reducebatch_strd(m, n, k, stride_a*sizeof(libxsmm_bfloat16), stride_b*sizeof(libxsmm_bfloat16), NULL, NULL, NULL, &alpha, &beta, &flags, NULL); PCL_ASSERT(kernel, "Null brgemm bf16 kernel\n"); kernel(A, B, C, &l_br); } inline void delbias_bf16_f32(int N, int M, int ldo, int ldi, libxsmm_meltw_unary_param *delbias_params) { libxsmm_meltwfunction_unary delbias_kernel = libxsmm_dispatch_meltw_unary(M, N, &ldi, &ldo, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT); if (delbias_kernel == NULL ) { printf("Could not create bf16 delbias kernel.. bailing\n"); exit(-1); } delbias_kernel(delbias_params); } class MLPCell_BF16 { public: MLPCell_BF16(int N, int C, int K, int bn, int bc, int bk, bool bias, bool skip, int act, bool norm, float p, bool train) { pN = N; pC = C; pK = K; pbn = bn; pbc = bc; pbk = bk; pbias = bias; pskip = skip; pact = act; pnorm = norm; pp = p; ptrain = train; //printf("MLPCell: N = %d, C = %d, K = %d, bf = %d, bias = %d, skip = %d, act = %d, norm = %d, dropout prob = %.2f train = %d\n", N, C, K, bias, skip, act, norm, p, train); } std::vector<at::Tensor> fwd(std::vector<at::Tensor> inputs) { long bn = pbn; long bc = pbc; long bk = pbk; long nn = pN/bn; long nc = pC; long nk = pK; long rn = pN % bn; long in_off = nn*nc*bn*bc; long out_off = nn*nk*bn*bk; long C = nc*bc; long K = nk*bk; // std::cout << "BF16--------------> " << std::endl; long bcp = (bc % 2 != 0) ? (bc + 1): bc; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param cvt_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_flags dropout_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK; libxsmm_meltw_binary_param add_params; libxsmm_meltw_binary_flags binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE; libxsmm_meltw_binary_type binary_type = LIBXSMM_MELTW_TYPE_BINARY_ADD; int i=0; at::Tensor t_input_l = inputs[i++]; at::Tensor t_input_r = inputs[i++]; at::Tensor t_weights_l = inputs[i++]; at::Tensor t_weights_r = inputs[i++]; at::Tensor t_bias_l = inputs[i++]; at::Tensor t_bias_r = inputs[i++]; at::Tensor t_output = t_input_l.new_empty({pN, K}); int dd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; at::Tensor t_dropout_mask_bN, t_dropout_mask_rN; if(ptrain && pp > 0) { int size = nn*nk*bn*bk; t_dropout_mask_bN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); if(rn > 0) { size = nk*rn*bk; t_dropout_mask_rN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); } } __mmask32 (*dropout_mask_bN)[nk][bn][dd] = (ptrain && pp > 0) ? (__mmask32 (*)[nk][bn][dd])(t_dropout_mask_bN.data_ptr()) : NULL; __mmask32 (*dropout_mask_rN)[nk][rn][dd] = (ptrain && pp > 0 && rn > 0) ? (__mmask32 (*)[nk][rn][dd])(t_dropout_mask_rN.data_ptr()) : NULL; int rd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; at::Tensor t_relumask_bN, t_relumask_rN; if(pact==1) { int size = nn*nk*bn*bk; t_relumask_bN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); if(rn > 0) { size = nk*rn*bk; t_relumask_rN = at::empty(size, torch::TensorOptions().dtype(torch::kByte)); } } __mmask32 (*relumask_bN)[nk][bn][rd] = pact==1 ? (__mmask32 (*)[nk][bn][rd])(t_relumask_bN.data_ptr()) : NULL; __mmask32 (*relumask_rN)[nk][rn][rd] = (pact==1 && rn > 0) ? (__mmask32 (*)[nk][rn][rd])(t_relumask_rN.data_ptr()) : NULL; int threads = 1; #ifdef _OPENMP threads = omp_get_max_threads(); #endif long wts = nk*nc*bk*bcp; long in_bn = threads*nc*bn*bc; long in_rn = nc*rn*bc; long out_bn = threads*nk*bn*bk; long out_rn = nk*rn*bk; long scratch_size; if(pskip) scratch_size = (wts*6 + in_bn*2 + in_rn*2 + out_bn*3 + out_rn*3 + K*2)*sizeof(libxsmm_bfloat16); else scratch_size = (wts*3 + in_bn + in_rn + out_bn + out_rn + K)*sizeof(libxsmm_bfloat16); void *scratch = libxsmm_aligned_malloc(scratch_size, 2097152); libxsmm_bfloat16 *t_bf16_weights_l = (libxsmm_bfloat16*)scratch; libxsmm_bfloat16 *t_tr_weights_l = t_bf16_weights_l + wts; libxsmm_bfloat16 *t_vnni_weights_l = t_tr_weights_l + wts; libxsmm_bfloat16 *t_input_bN_l = t_vnni_weights_l + wts; libxsmm_bfloat16 *t_output_bN_l = t_input_bN_l + in_bn; libxsmm_bfloat16 *t_output_bN = t_output_bN_l + out_bn; libxsmm_bfloat16 *t_bf16_bias_l = t_output_bN + out_bn; libxsmm_bfloat16 *t_input_rN_l=NULL, *t_output_rN_l=NULL, *t_output_rN=NULL; if(rn > 0) { t_input_rN_l = t_bf16_bias_l + K; t_output_rN_l = t_input_rN_l + in_rn; t_output_rN = t_output_rN_l + out_rn; } libxsmm_bfloat16 *t_bf16_weights_r=NULL, *t_tr_weights_r=NULL, *t_vnni_weights_r=NULL, *t_input_bN_r=NULL, *t_output_bN_r=NULL, *t_bf16_bias_r=NULL; libxsmm_bfloat16 *t_input_rN_r=NULL, *t_output_rN_r=NULL; if(pskip) { if(rn > 0) t_bf16_weights_r = t_output_rN + out_rn; else t_bf16_weights_r = t_bf16_bias_l + K; t_tr_weights_r = t_bf16_weights_r + wts; t_vnni_weights_r = t_tr_weights_r + wts; t_input_bN_r = t_vnni_weights_r + wts; t_output_bN_r = t_input_bN_r + in_bn; t_bf16_bias_r = t_output_bN_r + out_bn; if(rn > 0) { t_input_rN_r = t_bf16_bias_r + K; t_output_rN_r = t_input_rN_r + in_rn; } } DECL_VLA_PTR_PT(float, wt_f32_l, [C], t_weights_l); float *bias_f32_l = t_bias_l.data_ptr<float>(); float (*wt_f32_r)[C] = pskip ? (float (*)[C])t_weights_r.data_ptr<float>() : NULL; float *bias_f32_r = pskip ? t_bias_r.data_ptr<float>() : NULL; DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, input_l, [C], t_input_l); DECL_VLA_PTR_NPT_CHECK_COND(pskip, libxsmm_bfloat16, at::BFloat16, input_r, [C], t_input_r); DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, output, [K], t_output); DECL_VLA_PTR(libxsmm_bfloat16, wt_l, [nc][bk][bc], t_bf16_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, tr_wt_l, [nc][bcp][bk], t_tr_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, vnni_wt_l, [nc][bcp/2][bk][2], t_vnni_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, input_bN_l, [nc][bn][bc], t_input_bN_l); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, input_rN_l, [nc][rn][bc], t_input_rN_l); DECL_VLA_PTR(libxsmm_bfloat16, output_bN, [nk][bn][bk], t_output_bN); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, output_rN, [nk][rn][bk], t_output_rN); DECL_VLA_PTR(libxsmm_bfloat16, bias_l, [bk], t_bf16_bias_l); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, wt_r, [nc][bk][bc], t_bf16_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, tr_wt_r, [nc][bcp][bk], t_tr_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, vnni_wt_r, [nc][bcp/2][bk][2], t_vnni_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_bN_r, [nc][bn][bc], t_input_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_rN_r, [nc][rn][bc], t_input_rN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, output_bN_l, [nk][bn][bk], t_output_bN_l); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, output_bN_r, [nk][bn][bk], t_output_bN_r); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, output_rN_l, [nk][rn][bk], t_output_rN_l); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, output_rN_r, [nk][rn][bk], t_output_rN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, bias_r, [bk], t_bf16_bias_r); // Get BF16 copy of weights for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_l[k*bk][c*bc]; cvt_params.out.primary = &wt_l[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } if(pskip) { for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_r[k*bk][c*bc]; cvt_params.out.primary = &wt_r[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } } for(int k=0; k<nk; k++) { cvt_params.in.primary = bias_f32_l; cvt_params.out.primary = &bias_l[k]; cvt_f32_bf16(nk, bk, &cvt_params); } if(pskip) { for(int k=0; k<nk; k++) { cvt_params.in.primary = bias_f32_r; cvt_params.out.primary = &bias_r[k]; cvt_f32_bf16(nk, bk, &cvt_params); } } // Wt: NORM layout to VNNI norm_to_normT_16b(wt_l[0][0][0], tr_wt_l[0][0][0], bk, bcp); norm_to_vnni_16b(tr_wt_l[0][0][0], vnni_wt_l[0][0][0][0], bcp, bk); if(pskip) { norm_to_normT_16b(wt_r[0][0][0], tr_wt_r[0][0][0], bk, bcp); norm_to_vnni_16b(tr_wt_r[0][0][0], vnni_wt_r[0][0][0][0], bcp, bk); } #ifdef _OPENMP #pragma omp parallel #endif { int tid = omp_get_thread_num(); int threads = omp_get_max_threads(); int jobs = (nn % threads == 0) ? nn/threads : nn/threads + 1; int tb = (tid*jobs < nn) ? tid*jobs : nn; int te = ((tid+1)*jobs < nn) ? (tid+1)*jobs : nn; int count = nc; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_binary_param add_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_flags dropout_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK; for(int m=tb; m<te; m++) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[m*bn][c*bc]; copy_params.out.primary = &input_bN_l[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } if(pskip) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[m*bn][c*bc]; copy_params.out.primary = &input_bN_r[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &bias_l[k][0]; copy_params.out.primary = &output_bN_l[tid][k]; colbcast_bf16_copy(bn, bk, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &bias_r[k][0]; copy_params.out.primary = &output_bN_r[tid][k]; colbcast_bf16_copy(bn, bk, &copy_params); } } else { for(int k=0; k<nk; k++) { copy_params.in.primary = &bias_l[k][0]; copy_params.out.primary = &output_bN[tid][k]; colbcast_bf16_copy(bn, bk, &copy_params); } } if(pskip) { brgemm_bf16_bf16(bn, bk, bcp, bn*bk, 0, input_bN_l[tid][0][0], vnni_wt_l[0][0][0][0], output_bN_l[tid][0][0], count); brgemm_bf16_bf16(bn, bk, bcp, bn*bk, 0, input_bN_r[tid][0][0], vnni_wt_r[0][0][0][0], output_bN_r[tid][0][0], count); add_params.in0.primary = (void*)&output_bN_l[tid][0]; add_params.in1.primary = (void*)&output_bN_r[tid][0]; add_params.out.primary = (void*)&output_bN[tid][0]; add_bf16_bf16(bn, bk, &add_params); } else brgemm_bf16_bf16(bn, bk, bcp, bn*bk, 0, input_bN_l[tid][0][0], vnni_wt_l[0][0][0][0], output_bN[tid][0][0], count); if(pact == 1) { for(int k=0; k<nk; k++) { relu_params.in.primary = &output_bN[tid][k]; relu_params.out.primary = &output_bN[tid][k]; relu_params.out.secondary = &relumask_bN[m][k]; relu_fwd_bf16(bn, bk, &relu_params); } } if(ptrain && pp > 0) { for(int k=0; k<nk; k++) { dropout_params.in.primary = &output_bN[tid][k]; dropout_params.in.tertiary = &pp; dropout_params.in.secondary = rnd_state; dropout_params.out.primary = &output_bN[tid][k]; dropout_params.out.secondary = &dropout_mask_bN[m][k]; dropout_bf16(bn, bk, &dropout_params, dropout_flags); } } for(int k=0; k<nk; k++) { copy_params.in.primary = &output_bN[tid][k]; copy_params.out.primary = &output[m*bn][k*bk]; bf16_copy(bn, bk, nk*bk, nk*bk, &copy_params); } } } if(rn > 0) { // Single-threaded part of compute // for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[nn*bn][c*bc]; copy_params.out.primary = &input_rN_l[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } if(pskip) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[nn*bn][c*bc]; copy_params.out.primary = &input_rN_r[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = bias_l; copy_params.out.primary = &output_rN_l[0][k]; colbcast_bf16_copy(rn, bk, &copy_params); copy_params.in.primary = bias_r; copy_params.out.primary = &output_rN_r[0][k]; colbcast_bf16_copy(rn, bk, &copy_params); } } else { for(int k=0; k<nk; k++) { copy_params.in.primary = bias_l; copy_params.out.primary = &output_rN[0][k]; colbcast_bf16_copy(rn, bk, &copy_params); } } int count = nc; if(pskip) { brgemm_bf16_bf16(rn, bk, bcp, rn*bk, 0, input_rN_l[0][0][0], vnni_wt_l[0][0][0][0], output_rN_l[0][0][0], count); brgemm_bf16_bf16(rn, bk, bcp, rn*bk, 0, input_rN_r[0][0][0], vnni_wt_r[0][0][0][0], output_rN_r[0][0][0], count); add_params.in0.primary = (void*)&output_rN_l[0][0]; add_params.in1.primary = (void*)&output_rN_r[0][0]; add_params.out.primary = (void*)&output_rN[0][0]; add_bf16_bf16(rn, bk, &add_params); } else brgemm_bf16_bf16(rn, bk, bcp, rn*bk, 0, input_rN_l[0][0][0], vnni_wt_l[0][0][0][0], output_rN[0][0][0], count); if(pact == 1) { for(int k=0; k<nk; k++) { relu_params.in.primary = &output_rN[0][k]; relu_params.out.primary = &output_rN[0][k]; relu_params.out.secondary = &relumask_rN[0][k]; relu_fwd_bf16(rn, bk, &relu_params); } } if(ptrain && pp > 0) { for(int k=0; k<nk; k++) { dropout_params.in.primary = &output_rN[0][k]; dropout_params.in.secondary = rnd_state; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &output_rN[0][k]; dropout_params.out.secondary = &dropout_mask_rN[0][k]; dropout_bf16(rn, bk, &dropout_params, dropout_flags); } } for(int k=0; k<nk; k++) { copy_params.in.primary = &output_rN[0][k]; copy_params.out.primary = &output[nn*bn][k*bk]; bf16_copy(rn, bk, nk*bk, nk*bk, &copy_params); } } libxsmm_free((void*)scratch); return {t_output, t_relumask_bN, t_relumask_rN, t_dropout_mask_bN, t_dropout_mask_rN}; } ////===================================================================================================================================================== //// ====================== BackPass Function =========================== ////===================================================================================================================================================== std::vector<at::Tensor> bwd(std::vector<at::Tensor> inputs) { long bn = pbn; long bc = pbc; long bk = pbk; long nn = pN/bn; long nc = pC; long nk = pK; long rn = pN % bn; long K = nk*bk; long C = nc*bc; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_param delbias_params; libxsmm_meltw_unary_param cvt_params; libxsmm_meltw_unary_flags dropout_flags = LIBXSMM_MELTW_FLAG_UNARY_BITMASK; int threads = 1; #ifdef _OPENMP threads = omp_get_max_threads(); #endif // ---------------- zero Padding to handle brgemm reduction ------------- long bnp = (bn % 2 != 0) ? (bn + 1): bn; long rnp = (rn % 2 != 0) ? (rn + 1): rn; long bkp = (bk % 2 != 0) ? (bk + 1): bk; // ---------------------------------------------------------------------- int i=0; at::Tensor t_grad_output = inputs[i++]; at::Tensor t_input_l = inputs[i++]; at::Tensor t_input_r = inputs[i++]; at::Tensor t_weights_l = inputs[i++]; at::Tensor t_weights_r = inputs[i++]; at::Tensor t_relumask_bN = inputs[i++]; at::Tensor t_relumask_rN = inputs[i++]; at::Tensor t_dropout_mask_bN = inputs[i++]; at::Tensor t_dropout_mask_rN = inputs[i++]; at::Tensor t_grad_weights_l = t_weights_l.new_empty({nk, nc, bk, bc}); at::Tensor t_grad_bias_l = t_weights_l.new_empty(K); at::Tensor t_grad_input_l = t_input_l.new_empty({pN, C}); at::Tensor t_grad_weights_r, t_grad_bias_r, t_grad_input_r; if(pskip) { t_grad_weights_r = t_weights_r.new_empty({nk, nc, bk, bc}); t_grad_bias_r = t_weights_r.new_empty(K); t_grad_input_r = t_input_r.new_empty({pN, C}); } long wts = nk*nc*bkp*bc; long go_bn_k = threads*nk*bn*bkp; long go_rn_k = nk*rn*bkp; long go_bn_n = threads*nk*bnp*bk; long go_rn_n = nk*rnp*bk; long gi_bn = threads*nc*bn*bc; long gi_rn = nc*rn*bc; long tr_go_bn = threads*nk*bnp*bk; long tr_go_rn = nk*rnp*bk; long in_v_bn = threads*nc*bnp*bc; long in_v_rn = nc*rnp*bc; long in_bn = threads*nc*bn*bc; long in_rn = nc*rn*bc; long scratch_size; if(pskip) scratch_size = (wts*4 + go_bn_k + go_rn_k + go_bn_n + go_rn_n + gi_bn*2 + gi_rn*2 + tr_go_bn + tr_go_rn + in_v_bn*2 + in_v_rn*2 + in_bn*2 + in_rn*2)*sizeof(libxsmm_bfloat16) + (nk*nc*bk*bc*2)*sizeof(float); else scratch_size = (wts*2 + go_bn_k + go_rn_k + go_bn_n + go_rn_n + gi_bn + gi_rn + tr_go_bn + tr_go_rn + in_v_bn + in_v_rn + in_bn + in_rn)*sizeof(libxsmm_bfloat16) + (nk*nc*bk*bc)*sizeof(float); void *scratch = libxsmm_aligned_malloc(scratch_size, 2097152); libxsmm_bfloat16* t_grad_output_bN_K = (libxsmm_bfloat16*)scratch; libxsmm_bfloat16* t_grad_output_bN_N = t_grad_output_bN_K + go_bn_k; libxsmm_bfloat16* t_tr_grad_output_bN = t_grad_output_bN_N + go_bn_n; libxsmm_bfloat16* t_input_vnni_bN_l = t_tr_grad_output_bN + tr_go_bn; libxsmm_bfloat16* t_grad_input_bN_l = t_input_vnni_bN_l + in_v_bn; libxsmm_bfloat16* t_input_bN_l = t_grad_input_bN_l + gi_bn; libxsmm_bfloat16* t_vnni_weights_l = t_input_bN_l + in_bn; libxsmm_bfloat16* t_bf16_weights_l = t_vnni_weights_l + wts; float* t_f32_grad_wt_l = (float*)(t_bf16_weights_l + wts); libxsmm_bfloat16 *t_grad_output_rN_K=NULL, *t_grad_output_rN_N=NULL, *t_tr_grad_output_rN=NULL, *t_input_vnni_rN_l=NULL, *t_grad_input_rN_l=NULL; libxsmm_bfloat16 *t_input_rN_l=NULL; if(rn > 0) { t_grad_output_rN_K = (libxsmm_bfloat16*)(t_f32_grad_wt_l + wts); t_grad_output_rN_N = t_grad_output_rN_K + go_rn_k; t_tr_grad_output_rN = t_grad_output_rN_N + go_rn_n; t_input_vnni_rN_l = t_tr_grad_output_rN + tr_go_rn; t_grad_input_rN_l = t_input_vnni_rN_l + in_v_rn; t_input_rN_l = t_grad_input_rN_l + gi_rn; } libxsmm_bfloat16* t_input_vnni_bN_r=NULL, *t_grad_input_bN_r=NULL, *t_input_bN_r=NULL; libxsmm_bfloat16* t_vnni_weights_r=NULL, *t_bf16_weights_r=NULL, *t_input_vnni_rN_r=NULL, *t_grad_input_rN_r=NULL, *t_input_rN_r=NULL; float *t_f32_grad_wt_r=NULL; if(pskip) { if(rn > 0) t_input_vnni_bN_r = t_input_rN_l + in_rn; else t_input_vnni_bN_r = (libxsmm_bfloat16*)(t_f32_grad_wt_l + wts); t_grad_input_bN_r = t_input_vnni_bN_r + in_v_bn; t_input_bN_r = t_grad_input_bN_r + gi_bn; t_vnni_weights_r = t_input_bN_r + in_bn; t_bf16_weights_r = t_vnni_weights_r + wts; t_f32_grad_wt_r = (float*)(t_bf16_weights_r + wts); if(rn > 0) { t_input_vnni_rN_r = (libxsmm_bfloat16*)(t_f32_grad_wt_r + wts); t_grad_input_rN_r = t_input_vnni_rN_r + in_v_rn; t_input_rN_r = t_grad_input_rN_r + gi_rn; } } DECL_VLA_PTR_PT(float, wt_f32_l, [C], t_weights_l); DECL_VLA_PTR_PT(float, grad_wt_l, [C], t_grad_weights_l); float (*wt_f32_r)[C] = pskip ? (float (*)[C])t_weights_r.data_ptr<float>() : NULL; float (*grad_wt_r)[C] = pskip ? (float (*)[C])t_grad_weights_r.data_ptr<float>() : NULL; DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, grad_output, [K], t_grad_output); DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, input_l, [C], t_input_l); DECL_VLA_PTR_NPT(libxsmm_bfloat16, at::BFloat16, grad_input_l, [C], t_grad_input_l); DECL_VLA_PTR_NPT_CHECK_COND(pskip, libxsmm_bfloat16, at::BFloat16, input_r, [C], t_input_r); DECL_VLA_PTR_NPT_CHECK_COND(pskip, libxsmm_bfloat16, at::BFloat16, grad_input_r, [C], t_grad_input_r); DECL_VLA_PTR(libxsmm_bfloat16, grad_output_bN_K, [nk][bn][bkp], t_grad_output_bN_K); DECL_VLA_PTR(libxsmm_bfloat16, grad_output_bN_N, [nk][bnp][bk], t_grad_output_bN_N); DECL_VLA_PTR(libxsmm_bfloat16, tr_grad_output_bN, [nk][bk][bnp], t_tr_grad_output_bN); DECL_VLA_PTR(libxsmm_bfloat16, input_vnni_bN_l, [nc][bnp/2][bc][2], t_input_vnni_bN_l); DECL_VLA_PTR(libxsmm_bfloat16, grad_input_bN_l, [nc][bn][bc], t_grad_input_bN_l); DECL_VLA_PTR(libxsmm_bfloat16, input_bN_l, [nc][bn][bc], t_input_bN_l); DECL_VLA_PTR(libxsmm_bfloat16, vnni_wt_l, [nc][bkp/2][bc][2], t_vnni_weights_l); DECL_VLA_PTR(libxsmm_bfloat16, wt_l, [nc][bk][bc], t_bf16_weights_l); DECL_VLA_PTR(float, grad_wt_f32_l, [nc][bk][bc], t_f32_grad_wt_l); float *grad_bias_l = t_grad_bias_l.data_ptr<float>(); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_vnni_bN_r, [nc][bnp/2][bc][2], t_input_vnni_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, grad_input_bN_r, [nc][bn][bc], t_grad_input_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, input_bN_r, [nc][bn][bc], t_input_bN_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, vnni_wt_r, [nc][bkp/2][bc][2], t_vnni_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, libxsmm_bfloat16, wt_r, [nc][bk][bc], t_bf16_weights_r); DECL_VLA_PTR_CHECK_COND(pskip, float, grad_wt_f32_r, [nc][bk][bc], t_f32_grad_wt_r); float *grad_bias_r = pskip ? t_grad_bias_r.data_ptr<float>() : NULL; DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, grad_output_rN_K, [nk][rn][bkp], t_grad_output_rN_K); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, grad_output_rN_N, [nk][rnp][bk], t_grad_output_rN_N); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, tr_grad_output_rN, [nk][bk][rnp], t_tr_grad_output_rN); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, input_vnni_rN_l, [nc][rnp/2][bc][2], t_input_vnni_rN_l); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, grad_input_rN_l, [nc][rn][bc], t_grad_input_rN_l); DECL_VLA_PTR_CHECK_VAR(rn, libxsmm_bfloat16, input_rN_l, [nc][rn][bc], t_input_rN_l); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, input_vnni_rN_r, [nc][rnp/2][bc][2], t_input_vnni_rN_r); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, grad_input_rN_r, [nc][rn][bc], t_grad_input_rN_r); DECL_VLA_PTR_CHECK_COND_VAR(pskip, rn, libxsmm_bfloat16, input_rN_r, [nc][rn][bc], t_input_rN_r); int dd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; int rd = (bk % 32 == 0) ? bk/32 : bk/32 + 1; __mmask32 (*dropout_mask_bN)[nk][bn][dd] = (ptrain && pp > 0) ? (__mmask32 (*)[nk][bn][dd])(t_dropout_mask_bN.data_ptr()) : NULL; __mmask32 (*dropout_mask_rN)[nk][rn][dd] = (ptrain && pp > 0 && rn > 0) ? (__mmask32 (*)[nk][rn][dd])(t_dropout_mask_rN.data_ptr()) : NULL; __mmask32 (*relumask_bN)[nk][bn][rd] = pact==1 ? (__mmask32 (*)[nk][bn][rd])(t_relumask_bN.data_ptr()) : NULL; __mmask32 (*relumask_rN)[nk][rn][rd] = (pact==1 && rn > 0) ? (__mmask32 (*)[nk][rn][rd])(t_relumask_rN.data_ptr()) : NULL; copy_params.out.primary = t_f32_grad_wt_l; zero(K*C, &copy_params); copy_params.out.primary = t_grad_weights_l.data_ptr<float>(); zero(K*C, &copy_params); copy_params.out.primary = t_grad_bias_l.data_ptr<float>(); zero(K, &copy_params); if(pskip) { copy_params.out.primary = t_f32_grad_wt_r; zero(K*C, &copy_params); } if(pskip) { copy_params.out.primary = t_grad_weights_r.data_ptr<float>(); zero(K*C, &copy_params); copy_params.out.primary = t_grad_bias_r.data_ptr<float>(); zero(K, &copy_params); } // Get BF16 copy of weights for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_l[k*bk][c*bc]; cvt_params.out.primary = &wt_l[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } int count = nk; norm_to_vnni_16b(wt_l[0][0][0], vnni_wt_l[0][0][0][0], bkp, bc); //bk x bc --> bkp/2 x bc x 2 if(pskip) { for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { cvt_params.in.primary = &wt_f32_r[k*bk][c*bc]; cvt_params.out.primary = &wt_r[k][c]; cvt_f32_bf16(bk, bc, &cvt_params); } } int count = nk; norm_to_vnni_16b(wt_r[0][0][0], vnni_wt_r[0][0][0][0], bkp, bc); //bk x bc --> bkp/2 x bc x 2 } if(pskip) { #ifdef _OPENMP #pragma omp parallel reduction(+: grad_wt_f32_l[:nk][:nc][:bk][:bc], grad_bias_l[:K], grad_wt_f32_r[:nk][:nc][:bk][:bc], grad_bias_r[:K]) #endif { int tid = omp_get_thread_num(); int threads = omp_get_max_threads(); int jobs = (nn % threads == 0) ? nn/threads : nn/threads + 1; int tb = (tid*jobs < nn) ? tid*jobs : nn; int te = ((tid+1)*jobs < nn) ? (tid+1)*jobs : nn; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; for(int m=tb; m<te; m++) { for(int k=0; k<nk; k++) { if(ptrain && pp > 0) { dropout_params.in.primary = &grad_output[m*bn][k*bk]; dropout_params.in.secondary = &dropout_mask_bN[m][k][0][0]; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &grad_output[m*bn][k*bk]; dropout_bwd_bf16(bn, bk, &dropout_params, dropout_flags); } if(pact == 1) { relu_params.in.primary = &grad_output[m*bn][k*bk]; relu_params.in.secondary = &relumask_bN[m][k][0][0]; relu_params.out.primary = &grad_output[m*bn][k*bk]; relu_bwd_bf16(bn, bk, &relu_params); } copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_K[tid][k]; bf16_copy(bn, bk, nk*bkp, nk*bk, &copy_params); } brgemm_bf16_bf16(bn, bc, bkp, bn*bkp, 0, grad_output_bN_K[tid][0][0], vnni_wt_l[0][0][0][0], grad_input_bN_l[tid][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_bN_l[tid][c]; copy_params.out.primary = &grad_input_l[m*bn][c*bc]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } brgemm_bf16_bf16(bn, bc, bkp, bn*bkp, 0, grad_output_bN_K[tid][0][0], vnni_wt_r[0][0][0][0], grad_input_bN_r[tid][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_bN_r[tid][c]; copy_params.out.primary = &grad_input_r[m*bn][c*bc]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_N[tid][k]; bf16_copy(bnp, bk, nk*bk, nk*bk, &copy_params); norm_to_normT_16b(grad_output_bN_N[tid][k][0], tr_grad_output_bN[tid][k][0], bnp, bk); } for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[m*bn][c*bc]; copy_params.out.primary = &input_bN_l[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); norm_to_vnni_16b(input_bN_l[tid][c][0], input_vnni_bN_l[tid][c][0][0], bnp, bc); } count = 1; brgemm_bf16_f32(bk, bc, bnp, bnp*bk, bnp*bc, tr_grad_output_bN[tid][0][0], input_vnni_bN_l[tid][0][0][0], grad_wt_f32_l[0][0][0], count, 1.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[m*bn][c*bc]; copy_params.out.primary = &input_bN_r[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); norm_to_vnni_16b(input_bN_r[tid][c][0], input_vnni_bN_r[tid][c][0][0], bnp, bc); } count = 1; brgemm_bf16_f32(bk, bc, bnp, bnp*bk, bnp*bc, tr_grad_output_bN[tid][0][0], input_vnni_bN_r[tid][0][0][0], grad_wt_f32_r[0][0][0], count, 1.0); for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_bN_N[tid][k]; delbias_params.out.primary = grad_bias_l; delbias_bf16_f32(bn, bk, bn, bk, &delbias_params); } copy_params.in.primary = grad_bias_l; copy_params.out.primary = grad_bias_r; f32_copy(1, K, K, K, &copy_params); } } } else { #ifdef _OPENMP #pragma omp parallel reduction(+: grad_wt_f32_l[:nk][:nc][:bk][:bc], grad_bias_l[:K]) #endif { int tid = omp_get_thread_num(); int threads = omp_get_max_threads(); int jobs = (nn % threads == 0) ? nn/threads : nn/threads + 1; int tb = (tid*jobs < nn) ? tid*jobs : nn; int te = ((tid+1)*jobs < nn) ? (tid+1)*jobs : nn; libxsmm_meltw_unary_param relu_params; libxsmm_meltw_unary_param dropout_params; libxsmm_meltw_unary_param copy_params; libxsmm_meltw_unary_param delbias_params; for(int m=tb; m<te; m++) { for(int k=0; k<nk; k++) { if(ptrain && pp > 0) { dropout_params.in.primary = &grad_output[m*bn][k*bk]; dropout_params.in.secondary = &dropout_mask_bN[m][k][0][0]; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &grad_output[m*bn][k*bk]; dropout_bwd_bf16(bn, bk, &dropout_params, dropout_flags); } if(pact == 1) { relu_params.in.primary = &grad_output[m*bn][k*bk]; relu_params.in.secondary = &relumask_bN[m][k][0][0]; relu_params.out.primary = &grad_output[m*bn][k*bk]; relu_bwd_bf16(bn, bk, &relu_params); } copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_K[tid][k]; bf16_copy(bn, bk, nk*bkp, nk*bk, &copy_params); } brgemm_bf16_bf16(bn, bc, bkp, bn*bkp, 0, grad_output_bN_K[tid][0][0], vnni_wt_l[0][0][0][0], grad_input_bN_l[tid][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_bN_l[tid][c]; copy_params.out.primary = &grad_input_l[m*bn][c*bc]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); } for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[m*bn][k*bk]; copy_params.out.primary = &grad_output_bN_N[tid][k]; bf16_copy(bnp, bk, nk*bk, nk*bk, &copy_params); norm_to_normT_16b(grad_output_bN_N[tid][k][0], tr_grad_output_bN[tid][k][0], bnp, bk); } for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[m*bn][c*bc]; copy_params.out.primary = &input_bN_l[tid][c]; bf16_copy(bn, bc, nc*bc, nc*bc, &copy_params); norm_to_vnni_16b(input_bN_l[tid][c][0], input_vnni_bN_l[tid][c][0][0], bnp, bc); } count = 1; brgemm_bf16_f32(bk, bc, bnp, bnp*bk, bnp*bc, tr_grad_output_bN[tid][0][0], input_vnni_bN_l[tid][0][0][0], grad_wt_f32_l[0][0][0], count, 1.0); for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_bN_N[tid][k]; delbias_params.out.primary = grad_bias_l; delbias_bf16_f32(bn, bk, bn, bk, &delbias_params); } } } } if(rn > 0) { //Single-thread portion of code-------------------------- // Dropout if(ptrain && pp > 0) { for(int k=0; k<nk; k++) { dropout_params.in.primary = &grad_output[nn*bn][k*bk]; dropout_params.in.secondary = &dropout_mask_rN[0][k][0][0]; dropout_params.in.tertiary = &pp; dropout_params.out.primary = &grad_output[nn*bn][k*bk]; dropout_bwd_bf16(rn, bk, &dropout_params, dropout_flags); } } // ReLU if(pact == 1) { for(int k=0; k<nk; k++) { relu_params.in.primary = &grad_output[nn*bn][k*bk]; relu_params.in.secondary = &relumask_rN[0][k][0][0]; relu_params.out.primary = &grad_output[nn*bn][k*bk]; relu_bwd_bf16(rn, bk, &relu_params); } } //grad-input for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[nn*bn][k*bk]; copy_params.out.primary = &grad_output_rN_K[0][k]; bf16_copy(rn, bk, nk*bkp, nk*bk, &copy_params); } brgemm_bf16_bf16(rn, bc, bkp, rn*bkp, 0, grad_output_rN_K[0][0][0], vnni_wt_l[0][0][0][0], grad_input_rN_l[0][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_rN_l[0][c]; copy_params.out.primary = &grad_input_l[nn*bn][c*bc]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } if(pskip) { brgemm_bf16_bf16(rn, bc, bkp, rn*bkp, 0, grad_output_rN_K[0][0][0], vnni_wt_r[0][0][0][0], grad_input_rN_r[0][0][0], count, 0.0); for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_input_rN_r[0][c]; copy_params.out.primary = &grad_input_r[nn*bn][c*bc]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } } //grad-weights for(int k=0; k<nk; k++) { copy_params.in.primary = &grad_output[nn*bn][k*bk]; copy_params.out.primary = &grad_output_rN_N[0][k]; bf16_copy(rn, bk, nk*bk, nk*bk, &copy_params); norm_to_normT_16b(grad_output_rN_N[0][k][0], tr_grad_output_rN[0][k][0], rnp, bk); } for(int c=0; c<nc; c++) { copy_params.in.primary = &input_l[nn*bn][c*bc]; copy_params.out.primary = &input_rN_l[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } for(int c=0; c<nc; c++) norm_to_vnni_16b(input_rN_l[0][c][0], input_vnni_rN_l[0][c][0][0], rnp, bc); count = 1; brgemm_bf16_f32(bk, bc, rnp, rnp*bk, rnp*bc, tr_grad_output_rN[0][0][0], input_vnni_rN_l[0][0][0][0], grad_wt_f32_l[0][0][0], count, 1.0); if(pskip) { for(int c=0; c<nc; c++) { copy_params.in.primary = &input_r[nn*bn][c*bc]; copy_params.out.primary = &input_rN_r[0][c]; bf16_copy(rn, bc, nc*bc, nc*bc, &copy_params); } for(int c=0; c<nc; c++) norm_to_vnni_16b(input_rN_r[0][c][0], input_vnni_rN_r[0][c][0][0], rnp, bc); count = 1; brgemm_bf16_f32(bk, bc, rnp, rnp*bk, rnp*bc, tr_grad_output_rN[0][0][0], input_vnni_rN_r[0][0][0][0], grad_wt_f32_r[0][0][0], count, 1.0); } for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_rN_N[0][k]; delbias_params.out.primary = grad_bias_l; delbias_bf16_f32(rn, bk, rn, bk, &delbias_params); } if(pskip) { for(int k=0; k<nk; k++) { delbias_params.in.primary = &grad_output_rN_N[0][k]; delbias_params.out.primary = grad_bias_r; delbias_bf16_f32(rn, bk, rn, bk, &delbias_params); } } } for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_wt_f32_l[k][c]; copy_params.out.primary = &grad_wt_l[k*bk][c*bc]; f32_copy(bk, bc, nc*bc, nc*bc, &copy_params); } } if(pskip) { for(int k=0; k<nk; k++) { for(int c=0; c<nc; c++) { copy_params.in.primary = &grad_wt_f32_r[k][c]; copy_params.out.primary = &grad_wt_r[k*bk][c*bc]; f32_copy(bk, bc, nc*bc, nc*bc, &copy_params); } } } libxsmm_free(scratch); return {t_grad_input_l, t_grad_input_r, t_grad_weights_l, t_grad_weights_r, t_grad_bias_l, t_grad_bias_r}; } bool has_bias() {return pbias;} bool has_skip() {return pskip;} bool has_norm() {return pnorm;} private: long pN; long pC; long pK; long pbn; long pbc; long pbk; bool pbias; bool pskip; int pact; bool pnorm; float pp; bool ptrain; }; #endif
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/feature.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/morphology-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n n y E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of % edges in images. % % The format of the CannyEdgeImage method is: % % Image *CannyEdgeImage(const Image *image,const double radius, % const double sigma,const double lower_percent, % const double upper_percent,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the gaussian smoothing filter. % % o sigma: the sigma of the gaussian smoothing filter. % % o lower_percent: percentage of edge pixels in the lower threshold. % % o upper_percent: percentage of edge pixels in the upper threshold. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CannyInfo { double magnitude, intensity; int orientation; ssize_t x, y; } CannyInfo; static inline MagickBooleanType IsAuthenticPixel(const Image *image, const ssize_t x,const ssize_t y) { if ((x < 0) || (x >= (ssize_t) image->columns)) return(MagickFalse); if ((y < 0) || (y >= (ssize_t) image->rows)) return(MagickFalse); return(MagickTrue); } static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view, MatrixInfo *canny_cache,const ssize_t x,const ssize_t y, const double lower_threshold,ExceptionInfo *exception) { CannyInfo edge, pixel; MagickBooleanType status; register PixelPacket *q; register ssize_t i; q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); q->red=QuantumRange; q->green=QuantumRange; q->blue=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); edge.x=x; edge.y=y; if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); for (i=1; i != 0; ) { ssize_t v; i--; status=GetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); for (v=(-1); v <= 1; v++) { ssize_t u; for (u=(-1); u <= 1; u++) { if ((u == 0) && (v == 0)) continue; if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse) continue; /* Not an edge if gradient value is below the lower threshold. */ q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1, exception); if (q == (PixelPacket *) NULL) return(MagickFalse); status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel); if (status == MagickFalse) return(MagickFalse); if ((GetPixelIntensity(edge_image,q) == 0.0) && (pixel.intensity >= lower_threshold)) { q->red=QuantumRange; q->green=QuantumRange; q->blue=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); edge.x+=u; edge.y+=v; status=SetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); i++; } } } } return(MagickTrue); } MagickExport Image *CannyEdgeImage(const Image *image,const double radius, const double sigma,const double lower_percent,const double upper_percent, ExceptionInfo *exception) { #define CannyEdgeImageTag "CannyEdge/Image" CacheView *edge_view; CannyInfo element; char geometry[MaxTextExtent]; double lower_threshold, max, min, upper_threshold; Image *edge_image; KernelInfo *kernel_info; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *canny_cache; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Filter out noise. */ (void) FormatLocaleString(geometry,MaxTextExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (edge_image == (Image *) NULL) return((Image *) NULL); if (TransformImageColorspace(edge_image,GRAYColorspace) == MagickFalse) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } (void) SetImageAlphaChannel(edge_image,DeactivateAlphaChannel); /* Find the intensity gradient of the image. */ canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows, sizeof(CannyInfo),exception); if (canny_cache == (MatrixInfo *) NULL) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } status=MagickTrue; edge_view=AcquireVirtualCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; double dx, dy; register const PixelPacket *magick_restrict kernel_pixels; ssize_t v; static double Gx[2][2] = { { -1.0, +1.0 }, { -1.0, +1.0 } }, Gy[2][2] = { { +1.0, +1.0 }, { -1.0, -1.0 } }; (void) memset(&pixel,0,sizeof(pixel)); dx=0.0; dy=0.0; kernel_pixels=p; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { double intensity; intensity=GetPixelIntensity(edge_image,kernel_pixels+u); dx+=0.5*Gx[v][u]*intensity; dy+=0.5*Gy[v][u]*intensity; } kernel_pixels+=edge_image->columns+1; } pixel.magnitude=hypot(dx,dy); pixel.orientation=0; if (fabs(dx) > MagickEpsilon) { double slope; slope=dy/dx; if (slope < 0.0) { if (slope < -2.41421356237) pixel.orientation=0; else if (slope < -0.414213562373) pixel.orientation=1; else pixel.orientation=2; } else { if (slope > 2.41421356237) pixel.orientation=0; else if (slope > 0.414213562373) pixel.orientation=3; else pixel.orientation=2; } } if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse) continue; p++; } } edge_view=DestroyCacheView(edge_view); /* Non-maxima suppression, remove pixels that are not considered to be part of an edge. */ progress=0; (void) GetMatrixElement(canny_cache,0,0,&element); max=element.intensity; min=element.intensity; edge_view=AcquireAuthenticCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo alpha_pixel, beta_pixel, pixel; (void) GetMatrixElement(canny_cache,x,y,&pixel); switch (pixel.orientation) { case 0: default: { /* 0 degrees, north and south. */ (void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel); break; } case 1: { /* 45 degrees, northwest and southeast. */ (void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel); break; } case 2: { /* 90 degrees, east and west. */ (void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel); break; } case 3: { /* 135 degrees, northeast and southwest. */ (void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel); (void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel); break; } } pixel.intensity=pixel.magnitude; if ((pixel.magnitude < alpha_pixel.magnitude) || (pixel.magnitude < beta_pixel.magnitude)) pixel.intensity=0; (void) SetMatrixElement(canny_cache,x,y,&pixel); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif { if (pixel.intensity < min) min=pixel.intensity; if (pixel.intensity > max) max=pixel.intensity; } q->red=0; q->green=0; q->blue=0; q++; } if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CannyEdgeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } edge_view=DestroyCacheView(edge_view); /* Estimate hysteresis threshold. */ lower_threshold=lower_percent*(max-min)+min; upper_threshold=upper_percent*(max-min)+min; /* Hysteresis threshold. */ edge_view=AcquireAuthenticCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; register const PixelPacket *magick_restrict p; /* Edge if pixel gradient higher than upper threshold. */ p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception); if (p == (const PixelPacket *) NULL) continue; status=GetMatrixElement(canny_cache,x,y,&pixel); if (status == MagickFalse) continue; if ((GetPixelIntensity(edge_image,p) == 0.0) && (pixel.intensity >= upper_threshold)) status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold, exception); } } edge_view=DestroyCacheView(edge_view); /* Free resources. */ canny_cache=DestroyMatrixInfo(canny_cache); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageChannelFeatures(image,1,exception); % contrast=channel_features[RedChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageChannelFeatures method is: % % ChannelFeatures *GetImageChannelFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { DoublePixelPacket direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; LongPixelPacket gray, *grays; MagickBooleanType status; register ssize_t i; size_t length; ssize_t y; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=CompositeChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (LongPixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].opacity=(~0U); grays[i].index=(~0U); } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetPixelRed(p))].red= ScaleQuantumToMap(GetPixelRed(p)); grays[ScaleQuantumToMap(GetPixelGreen(p))].green= ScaleQuantumToMap(GetPixelGreen(p)); grays[ScaleQuantumToMap(GetPixelBlue(p))].blue= ScaleQuantumToMap(GetPixelBlue(p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index= ScaleQuantumToMap(GetPixelIndex(indexes+x)); if (image->matte != MagickFalse) grays[ScaleQuantumToMap(GetPixelOpacity(p))].opacity= ScaleQuantumToMap(GetPixelOpacity(p)); p++; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) memset(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[(ssize_t) gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[(ssize_t) gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[(ssize_t) gray.blue++].blue=grays[i].blue; if (image->colorspace == CMYKColorspace) if (grays[i].index != ~0U) grays[(ssize_t) gray.index++].index=grays[i].index; if (image->matte != MagickFalse) if (grays[i].opacity != ~0U) grays[(ssize_t) gray.opacity++].opacity=grays[i].opacity; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->colorspace == CMYKColorspace) if (gray.index > number_grays) number_grays=gray.index; if (image->matte != MagickFalse) if (gray.opacity > number_grays) number_grays=gray.opacity; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) memset(&correlation,0,sizeof(correlation)); (void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) memset(&mean,0,sizeof(mean)); (void) memset(sum,0,number_grays*sizeof(*sum)); (void) memset(&sum_squares,0,sizeof(sum_squares)); (void) memset(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) memset(&entropy_x,0,sizeof(entropy_x)); (void) memset(&entropy_xy,0,sizeof(entropy_xy)); (void) memset(&entropy_xy1,0,sizeof(entropy_xy1)); (void) memset(&entropy_xy2,0,sizeof(entropy_xy2)); (void) memset(&entropy_y,0,sizeof(entropy_y)); (void) memset(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) memset(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) memset(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; ssize_t i, offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+ 2*distance,distance+2,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); p+=distance; indexes+=distance; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetPixelRed(p))) u++; while (grays[v].red != ScaleQuantumToMap(GetPixelRed(p+offset))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(p))) u++; while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(p+offset))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(p))) u++; while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue)) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].index != ScaleQuantumToMap(GetPixelIndex(indexes+x))) u++; while (grays[v].index != ScaleQuantumToMap(GetPixelIndex(indexes+x+offset))) v++; cooccurrence[u][v].direction[i].index++; cooccurrence[v][u].direction[i].index++; } if (image->matte != MagickFalse) { u=0; v=0; while (grays[u].opacity != ScaleQuantumToMap(GetPixelOpacity(p))) u++; while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity)) v++; cooccurrence[u][v].direction[i].opacity++; cooccurrence[v][u].direction[i].opacity++; } } p++; } } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ for (i=0; i < 4; i++) { double normalize; register ssize_t y; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } normalize=PerceptibleReciprocal(normalize); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red*=normalize; cooccurrence[x][y].direction[i].green*=normalize; cooccurrence[x][y].direction[i].blue*=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].index*=normalize; if (image->matte != MagickFalse) cooccurrence[x][y].direction[i].opacity*=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BlueChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].index* cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].opacity* cooccurrence[x][y].direction[i].opacity; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) correlation.direction[i].index+=x*y* cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) correlation.direction[i].opacity+=x*y* cooccurrence[x][y].direction[i].opacity; /* Inverse Difference Moment. */ channel_features[RedChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BlueChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1); if (image->matte != MagickFalse) channel_features[OpacityChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_xy[y+x+2].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; /* Entropy. */ channel_features[RedChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); channel_features[GreenChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); channel_features[BlueChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].entropy[i]-= cooccurrence[x][y].direction[i].index* MagickLog10(cooccurrence[x][y].direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].entropy[i]-= cooccurrence[x][y].direction[i].opacity* MagickLog10(cooccurrence[x][y].direction[i].opacity); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_x[x].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_y[y].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->colorspace == CMYKColorspace) { mean.direction[i].index+=y*sum[y].direction[i].index; sum_squares.direction[i].index+=y*y*sum[y].direction[i].index; } if (image->matte != MagickFalse) { mean.direction[i].opacity+=y*sum[y].direction[i].opacity; sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BlueChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].correlation[i]= (correlation.direction[i].index-mean.direction[i].index* mean.direction[i].index)/(sqrt(sum_squares.direction[i].index- (mean.direction[i].index*mean.direction[i].index))*sqrt( sum_squares.direction[i].index-(mean.direction[i].index* mean.direction[i].index))); if (image->matte != MagickFalse) channel_features[OpacityChannel].correlation[i]= (correlation.direction[i].opacity-mean.direction[i].opacity* mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity- (mean.direction[i].opacity*mean.direction[i].opacity))*sqrt( sum_squares.direction[i].opacity-(mean.direction[i].opacity* mean.direction[i].opacity))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BlueChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_average[i]+= x*density_xy[x].direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_average[i]+= x*density_xy[x].direction[i].opacity; /* Sum entropy. */ channel_features[RedChannel].sum_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenChannel].sum_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BlueChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_entropy[i]-= density_xy[x].direction[i].index* MagickLog10(density_xy[x].direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_entropy[i]-= density_xy[x].direction[i].opacity* MagickLog10(density_xy[x].direction[i].opacity); /* Sum variance. */ channel_features[RedChannel].sum_variance[i]+= (x-channel_features[RedChannel].sum_entropy[i])* (x-channel_features[RedChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenChannel].sum_variance[i]+= (x-channel_features[GreenChannel].sum_entropy[i])* (x-channel_features[GreenChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BlueChannel].sum_variance[i]+= (x-channel_features[BlueChannel].sum_entropy[i])* (x-channel_features[BlueChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_variance[i]+= (x-channel_features[IndexChannel].sum_entropy[i])* (x-channel_features[IndexChannel].sum_entropy[i])* density_xy[x].direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_variance[i]+= (x-channel_features[OpacityChannel].sum_entropy[i])* (x-channel_features[OpacityChannel].sum_entropy[i])* density_xy[x].direction[i].opacity; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=(y-mean.direction[i].index+1)* (y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)* (y-mean.direction[i].opacity+1)* cooccurrence[x][y].direction[i].opacity; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index* MagickLog10(cooccurrence[x][y].direction[i].index); if (image->matte != MagickFalse) entropy_xy.direction[i].opacity-= cooccurrence[x][y].direction[i].opacity*MagickLog10( cooccurrence[x][y].direction[i].opacity); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* MagickLog10(density_x[x].direction[i].red* density_y[y].direction[i].red)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* MagickLog10(density_x[x].direction[i].blue* density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].index-=( cooccurrence[x][y].direction[i].index*MagickLog10( density_x[x].direction[i].index*density_y[y].direction[i].index)); if (image->matte != MagickFalse) entropy_xy1.direction[i].opacity-=( cooccurrence[x][y].direction[i].opacity*MagickLog10( density_x[x].direction[i].opacity* density_y[y].direction[i].opacity)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*MagickLog10( density_x[x].direction[i].red*density_y[y].direction[i].red)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*MagickLog10( density_x[x].direction[i].green*density_y[y].direction[i].green)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*MagickLog10( density_x[x].direction[i].blue*density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].index-=(density_x[x].direction[i].index* density_y[y].direction[i].index*MagickLog10( density_x[x].direction[i].index*density_y[y].direction[i].index)); if (image->matte != MagickFalse) entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity* density_y[y].direction[i].opacity*MagickLog10( density_x[x].direction[i].opacity* density_y[y].direction[i].opacity)); } } channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BlueChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].index; if (image->matte != MagickFalse) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].opacity; } /* Compute more texture features. */ (void) memset(&variance,0,sizeof(variance)); (void) memset(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=density_xy[x].direction[i].index; if (image->matte != MagickFalse) variance.direction[i].opacity+=density_xy[x].direction[i].opacity; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].index+=density_xy[x].direction[i].index* density_xy[x].direction[i].index; if (image->matte != MagickFalse) sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity* density_xy[x].direction[i].opacity; /* Difference entropy. */ channel_features[RedChannel].difference_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenChannel].difference_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BlueChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_entropy[i]-= density_xy[x].direction[i].index* MagickLog10(density_xy[x].direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_entropy[i]-= density_xy[x].direction[i].opacity* MagickLog10(density_xy[x].direction[i].opacity); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* MagickLog10(density_x[x].direction[i].red)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* MagickLog10(density_x[x].direction[i].green)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* MagickLog10(density_x[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].index-=(density_x[x].direction[i].index* MagickLog10(density_x[x].direction[i].index)); if (image->matte != MagickFalse) entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity* MagickLog10(density_x[x].direction[i].opacity)); entropy_y.direction[i].red-=(density_y[x].direction[i].red* MagickLog10(density_y[x].direction[i].red)); entropy_y.direction[i].green-=(density_y[x].direction[i].green* MagickLog10(density_y[x].direction[i].green)); entropy_y.direction[i].blue-=(density_y[x].direction[i].blue* MagickLog10(density_y[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].index-=(density_y[x].direction[i].index* MagickLog10(density_y[x].direction[i].index)); if (image->matte != MagickFalse) entropy_y.direction[i].opacity-=(density_y[x].direction[i].opacity* MagickLog10(density_y[x].direction[i].opacity)); } /* Difference variance. */ channel_features[RedChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BlueChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].opacity)- (variance.direction[i].opacity*variance.direction[i].opacity))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].index)- (variance.direction[i].index*variance.direction[i].index))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BlueChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/ (entropy_x.direction[i].index > entropy_y.direction[i].index ? entropy_x.direction[i].index : entropy_y.direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/ (entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ? entropy_x.direction[i].opacity : entropy_y.direction[i].opacity); channel_features[RedChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BlueChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index- entropy_xy.direction[i].index))))); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity- entropy_xy.direction[i].opacity))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t z; for (z=0; z < (ssize_t) number_grays; z++) { register ssize_t y; ChannelStatistics pixel; (void) memset(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) pixel.direction[i].index+=cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) pixel.direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; } /* Maximum Correlation Coefficient. */ Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/ density_y[x].direction[i].blue; if (image->colorspace == CMYKColorspace) Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index* cooccurrence[y][x].direction[i].index/ density_x[z].direction[i].index/density_y[x].direction[i].index; if (image->matte != MagickFalse) Q[z][y].direction[i].opacity+= cooccurrence[z][x].direction[i].opacity* cooccurrence[y][x].direction[i].opacity/ density_x[z].direction[i].opacity/ density_y[x].direction[i].opacity; } } channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red; channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green; channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].contrast[i]+=z*z* pixel.direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].contrast[i]+=z*z* pixel.direction[i].opacity; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BlueChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->matte != MagickFalse) channel_features[OpacityChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H o u g h L i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use HoughLineImage() in conjunction with any binary edge extracted image (we % recommand Canny) to identify lines in the image. The algorithm accumulates % counts for every white pixel for every possible orientation (for angles from % 0 to 179 in 1 degree increments) and distance from the center of the image to % the corner (in 1 px increments) and stores the counts in an accumulator matrix % of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next % it searches this space for peaks in counts and converts the locations of the % peaks to slope and intercept in the normal x,y input image space. Use the % slope/intercepts to find the endpoints clipped to the bounds of the image. The % lines are then drawn. The counts are a measure of the length of the lines % % The format of the HoughLineImage method is: % % Image *HoughLineImage(const Image *image,const size_t width, % const size_t height,const size_t threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find line pairs as local maxima in this neighborhood. % % o threshold: the line count threshold. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define BoundingBox "viewbox" DrawInfo *draw_info; Image *image; MagickBooleanType status; /* Open image. */ image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->columns=columns; image->rows=rows; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->affine.sx=image->x_resolution == 0.0 ? 1.0 : image->x_resolution/ DefaultResolution; draw_info->affine.sy=image->y_resolution == 0.0 ? 1.0 : image->y_resolution/ DefaultResolution; image->columns=(size_t) (draw_info->affine.sx*image->columns); image->rows=(size_t) (draw_info->affine.sy*image->rows); status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Render drawing. */ if (GetBlobStreamData(image) == (unsigned char *) NULL) draw_info->primitive=FileToString(image->filename,~0UL,exception); else { draw_info->primitive=(char *) AcquireMagickMemory((size_t) GetBlobSize(image)+1); if (draw_info->primitive != (char *) NULL) { (void) memcpy(draw_info->primitive,GetBlobStreamData(image), (size_t) GetBlobSize(image)); draw_info->primitive[GetBlobSize(image)]='\0'; } } (void) DrawImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } MagickExport Image *HoughLineImage(const Image *image,const size_t width, const size_t height,const size_t threshold,ExceptionInfo *exception) { #define HoughLineImageTag "HoughLine/Image" CacheView *image_view; char message[MaxTextExtent], path[MaxTextExtent]; const char *artifact; double hough_height; Image *lines_image = NULL; ImageInfo *image_info; int file; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *accumulator; PointInfo center; register ssize_t y; size_t accumulator_height, accumulator_width, line_count; /* Create the accumulator. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); accumulator_width=180; hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ? image->rows : image->columns))/2.0); accumulator_height=(size_t) (2.0*hough_height); accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height, sizeof(double),exception); if (accumulator == (MatrixInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (NullMatrix(accumulator) == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Populate the accumulator. */ status=MagickTrue; progress=0; center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelIntensity(image,p) > (QuantumRange/2.0)) { register ssize_t i; for (i=0; i < 180; i++) { double count, radius; radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+ (((double) y-center.y)*sin(DegreesToRadians((double) i))); (void) GetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); count++; (void) SetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); } } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,HoughLineImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } /* Generate line segments from accumulator. */ file=AcquireUniqueFileResource(path); if (file == -1) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } (void) FormatLocaleString(message,MaxTextExtent, "# Hough line transform: %.20gx%.20g%+.20g\n",(double) width, (double) height,(double) threshold); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MaxTextExtent,"viewbox 0 0 %.20g %.20g\n", (double) image->columns,(double) image->rows); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MaxTextExtent, "# x1,y1 x2,y2 # count angle distance\n"); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; line_count=image->columns > image->rows ? image->columns/4 : image->rows/4; if (threshold != 0) line_count=threshold; for (y=0; y < (ssize_t) accumulator_height; y++) { register ssize_t x; for (x=0; x < (ssize_t) accumulator_width; x++) { double count; (void) GetMatrixElement(accumulator,x,y,&count); if (count >= (double) line_count) { double maxima; SegmentInfo line; ssize_t v; /* Is point a local maxima? */ maxima=count; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((u != 0) || (v !=0)) { (void) GetMatrixElement(accumulator,x+u,y+v,&count); if (count > maxima) { maxima=count; break; } } } if (u < (ssize_t) (width/2)) break; } (void) GetMatrixElement(accumulator,x,y,&count); if (maxima > count) continue; if ((x >= 45) && (x <= 135)) { /* y = (r-x cos(t))/sin(t) */ line.x1=0.0; line.y1=((double) (y-(accumulator_height/2.0))-((line.x1- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); line.x2=(double) image->columns; line.y2=((double) (y-(accumulator_height/2.0))-((line.x2- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); } else { /* x = (r-y cos(t))/sin(t) */ line.y1=0.0; line.x1=((double) (y-(accumulator_height/2.0))-((line.y1- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); line.y2=(double) image->rows; line.x2=((double) (y-(accumulator_height/2.0))-((line.y2- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); } (void) FormatLocaleString(message,MaxTextExtent, "line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2, maxima,(double) x,(double) y); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; } } } (void) close(file); /* Render lines to image canvas. */ image_info=AcquireImageInfo(); image_info->background_color=image->background_color; (void) FormatLocaleString(image_info->filename,MaxTextExtent,"%s",path); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"background",artifact); artifact=GetImageArtifact(image,"fill"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"fill",artifact); artifact=GetImageArtifact(image,"stroke"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"stroke",artifact); artifact=GetImageArtifact(image,"strokewidth"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"strokewidth",artifact); lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception); artifact=GetImageArtifact(image,"hough-lines:accumulator"); if ((lines_image != (Image *) NULL) && (IsMagickTrue(artifact) != MagickFalse)) { Image *accumulator_image; accumulator_image=MatrixToImage(accumulator,exception); if (accumulator_image != (Image *) NULL) AppendImageToList(&lines_image,accumulator_image); } /* Free resources. */ accumulator=DestroyMatrixInfo(accumulator); image_info=DestroyImageInfo(image_info); (void) RelinquishUniqueFileResource(path); return(GetFirstImageInList(lines_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e a n S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MeanShiftImage() delineate arbitrarily shaped clusters in the image. For % each pixel, it visits all the pixels in the neighborhood specified by % the window centered at the pixel and excludes those that are outside the % radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those % that are within the specified color distance from the current mean, and % computes a new x,y centroid from those coordinates and a new mean. This new % x,y centroid is used as the center for a new window. This process iterates % until it converges and the final mean is replaces the (original window % center) pixel value. It repeats this process for the next pixel, etc., % until it processes all pixels in the image. Results are typically better with % colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr. % % The format of the MeanShiftImage method is: % % Image *MeanShiftImage(const Image *image,const size_t width, % const size_t height,const double color_distance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find pixels in this neighborhood. % % o color_distance: the color distance. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MeanShiftImage(const Image *image,const size_t width, const size_t height,const double color_distance,ExceptionInfo *exception) { #define MaxMeanShiftIterations 100 #define MeanShiftImageTag "MeanShift/Image" CacheView *image_view, *mean_view, *pixel_view; Image *mean_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); mean_image=CloneImage(image,0,0,MagickTrue,exception); if (mean_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(mean_image,DirectClass) == MagickFalse) { InheritException(exception,&mean_image->exception); mean_image=DestroyImage(mean_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); pixel_view=AcquireVirtualCacheView(image,exception); mean_view=AcquireAuthenticCacheView(mean_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,progress) \ magick_number_threads(mean_image,mean_image,mean_image->rows,1) #endif for (y=0; y < (ssize_t) mean_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) mean_image->columns; x++) { MagickPixelPacket mean_pixel, previous_pixel; PointInfo mean_location, previous_location; register ssize_t i; GetMagickPixelPacket(image,&mean_pixel); SetMagickPixelPacket(image,p,indexes+x,&mean_pixel); mean_location.x=(double) x; mean_location.y=(double) y; for (i=0; i < MaxMeanShiftIterations; i++) { double distance, gamma; MagickPixelPacket sum_pixel; PointInfo sum_location; ssize_t count, v; sum_location.x=0.0; sum_location.y=0.0; GetMagickPixelPacket(image,&sum_pixel); previous_location=mean_location; previous_pixel=mean_pixel; count=0; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2))) { PixelPacket pixel; status=GetOneCacheViewVirtualPixel(pixel_view,(ssize_t) MagickRound(mean_location.x+u),(ssize_t) MagickRound( mean_location.y+v),&pixel,exception); distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+ (mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+ (mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue); if (distance <= (color_distance*color_distance)) { sum_location.x+=mean_location.x+u; sum_location.y+=mean_location.y+v; sum_pixel.red+=pixel.red; sum_pixel.green+=pixel.green; sum_pixel.blue+=pixel.blue; sum_pixel.opacity+=pixel.opacity; count++; } } } } gamma=1.0/count; mean_location.x=gamma*sum_location.x; mean_location.y=gamma*sum_location.y; mean_pixel.red=gamma*sum_pixel.red; mean_pixel.green=gamma*sum_pixel.green; mean_pixel.blue=gamma*sum_pixel.blue; mean_pixel.opacity=gamma*sum_pixel.opacity; distance=(mean_location.x-previous_location.x)* (mean_location.x-previous_location.x)+ (mean_location.y-previous_location.y)* (mean_location.y-previous_location.y)+ 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)* 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+ 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)* 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+ 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)* 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue); if (distance <= 3.0) break; } q->red=ClampToQuantum(mean_pixel.red); q->green=ClampToQuantum(mean_pixel.green); q->blue=ClampToQuantum(mean_pixel.blue); q->opacity=ClampToQuantum(mean_pixel.opacity); p++; q++; } if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MeanShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } mean_view=DestroyCacheView(mean_view); pixel_view=DestroyCacheView(pixel_view); image_view=DestroyCacheView(image_view); return(mean_image); }
DRB038-truedepseconddimension-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized in this program. Data race pair: b[i][j]@65:7 vs. b[i][j-1]@65:15 */ #include <stdlib.h> #include <omp.h> int main(int argc,char *argv[]) { int i; int j; int len = 1000; if (argc > 1) len = atoi(argv[1]); int n = len; int m = len; double b[n][m]; #pragma omp parallel for private (i,j) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= m - 1; j += 1) { b[i][j] = (i + j); } } #pragma omp parallel for private (i,j) for (i = 0; i <= n - 1; i += 1) { for (j = 1; j <= m - 1; j += 1) { b[i][j] = b[i][j - 1]; } } for (i = 0; i <= n - 1; i += 1) { for (j = 0; j <= m - 1; j += 1) { printf("%lf\n",b[i][j]); } } return 0; }
dgemm_blasfeo.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from compute/zgemm.c, normal z -> d, Thu Aug 8 10:18:22 2019 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" #include "blasfeo_d_aux.h" /***************************************************************************//** * * @ingroup plasma_gemm * * Performs one of the matrix-matrix operations * * \f[ C = \alpha [op( A )\times op( B )] + \beta C, \f] * * where op( X ) is one of: * \f[ op( X ) = X, \f] * \f[ op( X ) = X^T, \f] * \f[ op( X ) = X^T, \f] * * alpha and beta are scalars, and A, B and C are matrices, with op( A ) * an m-by-k matrix, op( B ) a k-by-n matrix and C an m-by-n matrix. * ******************************************************************************* * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] transb * - PlasmaNoTrans: B is not transposed, * - PlasmaTrans: B is transposed, * - PlasmaConjTrans: B is conjugate transposed. * * @param[in] m * The number of rows of the matrix op( A ) and of the matrix C. * m >= 0. * * @param[in] n * The number of columns of the matrix op( B ) and of the matrix C. * n >= 0. * * @param[in] k * The number of columns of the matrix op( A ) and the number of rows * of the matrix op( B ). k >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * An lda-by-ka matrix, where ka is k when transa = PlasmaNoTrans, * and is m otherwise. * * @param[in] lda * The leading dimension of the array A. * When transa = PlasmaNoTrans, lda >= max(1,m), * otherwise, lda >= max(1,k). * * @param[in] pB * An ldb-by-kb matrix, where kb is n when transb = PlasmaNoTrans, * and is k otherwise. * * @param[in] ldb * The leading dimension of the array B. * When transb = PlasmaNoTrans, ldb >= max(1,k), * otherwise, ldb >= max(1,n). * * @param[in] beta * The scalar beta. * * @param[in,out] pC * An ldc-by-n matrix. On exit, the array is overwritten by the m-by-n * matrix ( alpha*op( A )*op( B ) + beta*C ). * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_dgemm * @sa plasma_cgemm * @sa plasma_dgemm * @sa plasma_sgemm * ******************************************************************************/ int plasma_dgemm_blasfeo(plasma_enum_t transa, plasma_enum_t transb, int m, int n, int k, double alpha, double *pA, int lda, double *pB, int ldb, double beta, double *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); return -1; } if ((transb != PlasmaNoTrans) && (transb != PlasmaTrans) && (transb != PlasmaConjTrans)) { plasma_error("illegal value of transb"); return -2; } if (m < 0) { plasma_error("illegal value of m"); return -3; } if (n < 0) { plasma_error("illegal value of n"); return -4; } if (k < 0) { plasma_error("illegal value of k"); return -5; } int am, an; int bm, bn; if (transa == PlasmaNoTrans) { am = m; an = k; } else { am = k; an = m; } if (transb == PlasmaNoTrans) { bm = k; bn = n; } else { bm = n; bn = k; } if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -8; } if (ldb < imax(1, bm)) { plasma_error("illegal value of ldb"); return -10; } if (ldc < imax(1, m)) { plasma_error("illegal value of ldc"); return -13; } // quick return if (m == 0 || n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_gemm(plasma, PlasmaRealDouble, m, n, k); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, bm, bn, 0, 0, bm, bn, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, m, n, 0, 0, m, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc_blasfeo(pA, lda, A, &sequence, &request); plasma_omp_dge2desc_blasfeo(pB, ldb, B, &sequence, &request); plasma_omp_dge2desc_blasfeo(pC, ldc, C, &sequence, &request); //d_print_mat(am, an, pA, lda); //plasma_dprint_blasfeo(A); //exit(1); // Call the tile async function. plasma_omp_dgemm_blasfeo(transa, transb, alpha, A, B, beta, C, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_ddesc2ge_blasfeo(C, pC, ldc, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&C); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_gemm * * Performs matrix multiplication. * Non-blocking tile version of plasma_dgemm(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] transb * - PlasmaNoTrans: B is not transposed, * - PlasmaTrans: B is transposed, * - PlasmaConjTrans: B is conjugate transposed. * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * * @param[in] B * Descriptor of matrix B. * * @param[in] beta * The scalar beta. * * @param[in,out] C * Descriptor of matrix C. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dgemm * @sa plasma_omp_cgemm * @sa plasma_omp_dgemm * @sa plasma_omp_sgemm * ******************************************************************************/ void plasma_omp_dgemm_blasfeo(plasma_enum_t transa, plasma_enum_t transb, double alpha, plasma_desc_t A, plasma_desc_t B, double beta, plasma_desc_t C, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((transb != PlasmaNoTrans) && (transb != PlasmaTrans) && (transb != PlasmaConjTrans)) { plasma_error("illegal value of transb"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int k = transa == PlasmaNoTrans ? A.n : A.m; if (C.m == 0 || C.n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return; // printf("%d %d\n",transa,transb); // Call the parallel function. plasma_pdgemm_blasfeo(transa, transb, alpha, A, B, beta, C, sequence, request); }
convolution_4x4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv4x4s4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 4*outw + w*3; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*16 + q*16; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); #else const float* k0 = kernel0; const float* k1 = kernel0 + 4; const float* k2 = kernel0 + 8; const float* k3 = kernel0 + 12; #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v7.4s}, [%1] \n" // v7 = outptr "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "prfm pldl1keep, [%4, #512] \n" "prfm pldl1keep, [%5, #512] \n" "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v5.4s, v12.4s, v13.4s \n" "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v6.4s, v12.4s, v13.4s \n" "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v14.4s, v12.4s, v13.4s \n" "faddp v5.4s, v5.4s, v6.4s \n" // Move to here to enhance ILP "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v15.4s, v12.4s, v13.4s \n" // "faddp v5.4s , v5.4s, v6.4s \n" // Move this line upward. "faddp v14.4s, v14.4s, v15.4s \n" "faddp v5.4s , v5.4s, v14.4s \n" "fadd v7.4s, v7.4s, v5.4s \n" "st1 {v7.4s}, [%1], #16 \n" "prfm pldl1keep, [%1, #128] \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415) // %15 : "cc", "memory", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "0: \n" "pld [%2, #512] \n" "pld [%3, #512] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "pld [%4, #512] \n" "pld [%5, #512] \n" "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q5, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q6, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q14, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q15, q12, q13 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d28, d28, d29 \n" "vadd.f32 d11, d12, d13 \n" "vadd.f32 d29, d30, d31 \n" "vpadd.f32 d10, d10, d11 \n" "vpadd.f32 d11, d28, d29 \n" "vadd.f32 q7, q7, q5 \n" "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415) // %15 : "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float sum = 0.f; asm volatile( "ld1 {v8.4s}, [%0], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%1], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %9.4s \n" "fmul v13.4s, v9.4s, %10.4s \n" "ld1 {v10.4s}, [%2], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%3], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %11.4s \n" "fmla v13.4s, v11.4s, %12.4s \n" "fadd v5.4s, v12.4s, v13.4s \n" "faddp v5.4s, v5.4s, v5.4s \n" "faddp s5, v5.2s \n" "fmov %w4, s5 \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(sum) // %4 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "w"(_k0123), // %9 "w"(_k4567), // %10 "w"(_k891011), // %11 "w"(_k12131415) // %12 : "cc", "memory", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13" ); *outptr += sum; #else float sum = 0.f; asm volatile( "vld1.f32 {d16-d17}, [%0]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%1]! \n"// q9 = r1 "vmul.f32 q12, q8, %q9 \n" "vmul.f32 q13, q9, %q10 \n" "vld1.f32 {d20-d21}, [%2]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%3]! \n"// q11 = r3 "vmla.f32 q12, q10, %q11 \n" "vmla.f32 q13, q11, %q12 \n" "vadd.f32 q5, q12, q13 \n" "vadd.f32 d10, d10, d11 \n" "vpadd.f32 d10, d10, d10 \n" "vmov.f32 %4, d10[0] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(sum) // %4 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "w"(_k0123), // %9 "w"(_k4567), // %10 "w"(_k891011), // %11 "w"(_k12131415) // %12 : "cc", "memory", "q5", "q6", "q8", "q9", "q10", "q11", "q12", "q13" ); *outptr += sum; #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; *outptr += sum; r0 += 4; r1 += 4; r2 += 4; r3 += 4; #endif // __ARM_NEON outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } } }
omp-expand.c
/* Expansion pass for OMP directives. Outlines regions of certain OMP directives to separate functions, converts others into explicit calls to the runtime library (libgomp) and so forth Copyright (C) 2005-2020 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "memmodel.h" #include "backend.h" #include "target.h" #include "rtl.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "tree-pass.h" #include "ssa.h" #include "optabs.h" #include "cgraph.h" #include "pretty-print.h" #include "diagnostic-core.h" #include "fold-const.h" #include "stor-layout.h" #include "cfganal.h" #include "internal-fn.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "gimple-walk.h" #include "tree-cfg.h" #include "tree-into-ssa.h" #include "tree-ssa.h" #include "splay-tree.h" #include "cfgloop.h" #include "omp-general.h" #include "omp-offload.h" #include "tree-cfgcleanup.h" #include "alloc-pool.h" #include "symbol-summary.h" #include "gomp-constants.h" #include "gimple-pretty-print.h" #include "stringpool.h" #include "attribs.h" #include "tree-eh.h" /* OMP region information. Every parallel and workshare directive is enclosed between two markers, the OMP_* directive and a corresponding GIMPLE_OMP_RETURN statement. */ struct omp_region { /* The enclosing region. */ struct omp_region *outer; /* First child region. */ struct omp_region *inner; /* Next peer region. */ struct omp_region *next; /* Block containing the omp directive as its last stmt. */ basic_block entry; /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */ basic_block exit; /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */ basic_block cont; /* If this is a combined parallel+workshare region, this is a list of additional arguments needed by the combined parallel+workshare library call. */ vec<tree, va_gc> *ws_args; /* The code for the omp directive of this region. */ enum gimple_code type; /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */ enum omp_clause_schedule_kind sched_kind; /* Schedule modifiers. */ unsigned char sched_modifiers; /* True if this is a combined parallel+workshare region. */ bool is_combined_parallel; /* Copy of fd.lastprivate_conditional != 0. */ bool has_lastprivate_conditional; /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has a depend clause. */ gomp_ordered *ord_stmt; }; static struct omp_region *root_omp_region; static bool omp_any_child_fn_dumped; static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree, bool = false); static gphi *find_phi_with_arg_on_edge (tree, edge); static void expand_omp (struct omp_region *region); /* Return true if REGION is a combined parallel+workshare region. */ static inline bool is_combined_parallel (struct omp_region *region) { return region->is_combined_parallel; } /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB is the immediate dominator of PAR_ENTRY_BB, return true if there are no data dependencies that would prevent expanding the parallel directive at PAR_ENTRY_BB as a combined parallel+workshare region. When expanding a combined parallel+workshare region, the call to the child function may need additional arguments in the case of GIMPLE_OMP_FOR regions. In some cases, these arguments are computed out of variables passed in from the parent to the child via 'struct .omp_data_s'. For instance: #pragma omp parallel for schedule (guided, i * 4) for (j ...) Is lowered into: # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) # BLOCK 3 (WS_ENTRY_BB) .omp_data_i = &.omp_data_o; D.1667 = .omp_data_i->i; D.1598 = D.1667 * 4; #pragma omp for schedule (guided, D.1598) When we outline the parallel region, the call to the child function 'bar.omp_fn.0' will need the value D.1598 in its argument list, but that value is computed *after* the call site. So, in principle we cannot do the transformation. To see whether the code in WS_ENTRY_BB blocks the combined parallel+workshare call, we collect all the variables used in the GIMPLE_OMP_FOR header check whether they appear on the LHS of any statement in WS_ENTRY_BB. If so, then we cannot emit the combined call. FIXME. If we had the SSA form built at this point, we could merely hoist the code in block 3 into block 2 and be done with it. But at this point we don't have dataflow information and though we could hack something up here, it is really not worth the aggravation. */ static bool workshare_safe_to_combine_p (basic_block ws_entry_bb) { struct omp_for_data fd; gimple *ws_stmt = last_stmt (ws_entry_bb); if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) return true; gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR); if (gimple_omp_for_kind (ws_stmt) != GF_OMP_FOR_KIND_FOR) return false; omp_extract_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL); if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST) return false; if (fd.iter_type != long_integer_type_node) return false; /* FIXME. We give up too easily here. If any of these arguments are not constants, they will likely involve variables that have been mapped into fields of .omp_data_s for sharing with the child function. With appropriate data flow, it would be possible to see through this. */ if (!is_gimple_min_invariant (fd.loop.n1) || !is_gimple_min_invariant (fd.loop.n2) || !is_gimple_min_invariant (fd.loop.step) || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) return false; return true; } /* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier presence (SIMD_SCHEDULE). */ static tree omp_adjust_chunk_size (tree chunk_size, bool simd_schedule) { if (!simd_schedule || integer_zerop (chunk_size)) return chunk_size; poly_uint64 vf = omp_max_vf (); if (known_eq (vf, 1U)) return chunk_size; tree type = TREE_TYPE (chunk_size); chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size, build_int_cst (type, vf - 1)); return fold_build2 (BIT_AND_EXPR, type, chunk_size, build_int_cst (type, -vf)); } /* Collect additional arguments needed to emit a combined parallel+workshare call. WS_STMT is the workshare directive being expanded. */ static vec<tree, va_gc> * get_ws_args_for (gimple *par_stmt, gimple *ws_stmt) { tree t; location_t loc = gimple_location (ws_stmt); vec<tree, va_gc> *ws_args; if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt)) { struct omp_for_data fd; tree n1, n2; omp_extract_for_data (for_stmt, &fd, NULL); n1 = fd.loop.n1; n2 = fd.loop.n2; if (gimple_omp_for_combined_into_p (for_stmt)) { tree innerc = omp_find_clause (gimple_omp_parallel_clauses (par_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } vec_alloc (ws_args, 3 + (fd.chunk_size != 0)); t = fold_convert_loc (loc, long_integer_type_node, n1); ws_args->quick_push (t); t = fold_convert_loc (loc, long_integer_type_node, n2); ws_args->quick_push (t); t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step); ws_args->quick_push (t); if (fd.chunk_size) { t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size); t = omp_adjust_chunk_size (t, fd.simd_schedule); ws_args->quick_push (t); } return ws_args; } else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) { /* Number of sections is equal to the number of edges from the GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to the exit of the sections region. */ basic_block bb = single_succ (gimple_bb (ws_stmt)); t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1); vec_alloc (ws_args, 1); ws_args->quick_push (t); return ws_args; } gcc_unreachable (); } /* Discover whether REGION is a combined parallel+workshare region. */ static void determine_parallel_type (struct omp_region *region) { basic_block par_entry_bb, par_exit_bb; basic_block ws_entry_bb, ws_exit_bb; if (region == NULL || region->inner == NULL || region->exit == NULL || region->inner->exit == NULL || region->inner->cont == NULL) return; /* We only support parallel+for and parallel+sections. */ if (region->type != GIMPLE_OMP_PARALLEL || (region->inner->type != GIMPLE_OMP_FOR && region->inner->type != GIMPLE_OMP_SECTIONS)) return; /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and WS_EXIT_BB -> PAR_EXIT_BB. */ par_entry_bb = region->entry; par_exit_bb = region->exit; ws_entry_bb = region->inner->entry; ws_exit_bb = region->inner->exit; /* Give up for task reductions on the parallel, while it is implementable, adding another big set of APIs or slowing down the normal paths is not acceptable. */ tree pclauses = gimple_omp_parallel_clauses (last_stmt (par_entry_bb)); if (omp_find_clause (pclauses, OMP_CLAUSE__REDUCTEMP_)) return; if (single_succ (par_entry_bb) == ws_entry_bb && single_succ (ws_exit_bb) == par_exit_bb && workshare_safe_to_combine_p (ws_entry_bb) && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb)) || (last_and_only_stmt (ws_entry_bb) && last_and_only_stmt (par_exit_bb)))) { gimple *par_stmt = last_stmt (par_entry_bb); gimple *ws_stmt = last_stmt (ws_entry_bb); if (region->inner->type == GIMPLE_OMP_FOR) { /* If this is a combined parallel loop, we need to determine whether or not to use the combined library calls. There are two cases where we do not apply the transformation: static loops and any kind of ordered loop. In the first case, we already open code the loop so there is no need to do anything else. In the latter case, the combined parallel loop call would still need extra synchronization to implement ordered semantics, so there would not be any gain in using the combined call. */ tree clauses = gimple_omp_for_clauses (ws_stmt); tree c = omp_find_clause (clauses, OMP_CLAUSE_SCHEDULE); if (c == NULL || ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK) == OMP_CLAUSE_SCHEDULE_STATIC) || omp_find_clause (clauses, OMP_CLAUSE_ORDERED) || omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_) || ((c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_)) && POINTER_TYPE_P (TREE_TYPE (OMP_CLAUSE_DECL (c))))) return; } else if (region->inner->type == GIMPLE_OMP_SECTIONS && (omp_find_clause (gimple_omp_sections_clauses (ws_stmt), OMP_CLAUSE__REDUCTEMP_) || omp_find_clause (gimple_omp_sections_clauses (ws_stmt), OMP_CLAUSE__CONDTEMP_))) return; region->is_combined_parallel = true; region->inner->is_combined_parallel = true; region->ws_args = get_ws_args_for (par_stmt, ws_stmt); } } /* Debugging dumps for parallel regions. */ void dump_omp_region (FILE *, struct omp_region *, int); void debug_omp_region (struct omp_region *); void debug_all_omp_regions (void); /* Dump the parallel region tree rooted at REGION. */ void dump_omp_region (FILE *file, struct omp_region *region, int indent) { fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, gimple_code_name[region->type]); if (region->inner) dump_omp_region (file, region->inner, indent + 4); if (region->cont) { fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "", region->cont->index); } if (region->exit) fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "", region->exit->index); else fprintf (file, "%*s[no exit marker]\n", indent, ""); if (region->next) dump_omp_region (file, region->next, indent); } DEBUG_FUNCTION void debug_omp_region (struct omp_region *region) { dump_omp_region (stderr, region, 0); } DEBUG_FUNCTION void debug_all_omp_regions (void) { dump_omp_region (stderr, root_omp_region, 0); } /* Create a new parallel region starting at STMT inside region PARENT. */ static struct omp_region * new_omp_region (basic_block bb, enum gimple_code type, struct omp_region *parent) { struct omp_region *region = XCNEW (struct omp_region); region->outer = parent; region->entry = bb; region->type = type; if (parent) { /* This is a nested region. Add it to the list of inner regions in PARENT. */ region->next = parent->inner; parent->inner = region; } else { /* This is a toplevel region. Add it to the list of toplevel regions in ROOT_OMP_REGION. */ region->next = root_omp_region; root_omp_region = region; } return region; } /* Release the memory associated with the region tree rooted at REGION. */ static void free_omp_region_1 (struct omp_region *region) { struct omp_region *i, *n; for (i = region->inner; i ; i = n) { n = i->next; free_omp_region_1 (i); } free (region); } /* Release the memory for the entire omp region tree. */ void omp_free_regions (void) { struct omp_region *r, *n; for (r = root_omp_region; r ; r = n) { n = r->next; free_omp_region_1 (r); } root_omp_region = NULL; } /* A convenience function to build an empty GIMPLE_COND with just the condition. */ static gcond * gimple_build_cond_empty (tree cond) { enum tree_code pred_code; tree lhs, rhs; gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs); return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE); } /* Change DECL_CONTEXT of CHILD_FNDECL to that of the parent function. Add CHILD_FNDECL to decl chain of the supercontext of the block ENTRY_BLOCK - this is the block which originally contained the code from which CHILD_FNDECL was created. Together, these actions ensure that the debug info for the outlined function will be emitted with the correct lexical scope. */ static void adjust_context_and_scope (struct omp_region *region, tree entry_block, tree child_fndecl) { tree parent_fndecl = NULL_TREE; gimple *entry_stmt; /* OMP expansion expands inner regions before outer ones, so if we e.g. have explicit task region nested in parallel region, when expanding the task region current_function_decl will be the original source function, but we actually want to use as context the child function of the parallel. */ for (region = region->outer; region && parent_fndecl == NULL_TREE; region = region->outer) switch (region->type) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_TEAMS: entry_stmt = last_stmt (region->entry); parent_fndecl = gimple_omp_taskreg_child_fn (entry_stmt); break; case GIMPLE_OMP_TARGET: entry_stmt = last_stmt (region->entry); parent_fndecl = gimple_omp_target_child_fn (as_a <gomp_target *> (entry_stmt)); break; default: break; } if (parent_fndecl == NULL_TREE) parent_fndecl = current_function_decl; DECL_CONTEXT (child_fndecl) = parent_fndecl; if (entry_block != NULL_TREE && TREE_CODE (entry_block) == BLOCK) { tree b = BLOCK_SUPERCONTEXT (entry_block); if (TREE_CODE (b) == BLOCK) { DECL_CHAIN (child_fndecl) = BLOCK_VARS (b); BLOCK_VARS (b) = child_fndecl; } } } /* Build the function calls to GOMP_parallel etc to actually generate the parallel operation. REGION is the parallel region being expanded. BB is the block where to insert the code. WS_ARGS will be set if this is a call to a combined parallel+workshare construct, it contains the list of additional arguments needed by the workshare construct. */ static void expand_parallel_call (struct omp_region *region, basic_block bb, gomp_parallel *entry_stmt, vec<tree, va_gc> *ws_args) { tree t, t1, t2, val, cond, c, clauses, flags; gimple_stmt_iterator gsi; gimple *stmt; enum built_in_function start_ix; int start_ix2; location_t clause_loc; vec<tree, va_gc> *args; clauses = gimple_omp_parallel_clauses (entry_stmt); /* Determine what flavor of GOMP_parallel we will be emitting. */ start_ix = BUILT_IN_GOMP_PARALLEL; tree rtmp = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); if (rtmp) start_ix = BUILT_IN_GOMP_PARALLEL_REDUCTIONS; else if (is_combined_parallel (region)) { switch (region->inner->type) { case GIMPLE_OMP_FOR: gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO); switch (region->inner->sched_kind) { case OMP_CLAUSE_SCHEDULE_RUNTIME: /* For lastprivate(conditional:), our implementation requires monotonic behavior. */ if (region->inner->has_lastprivate_conditional != 0) start_ix2 = 3; else if ((region->inner->sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) != 0) start_ix2 = 6; else if ((region->inner->sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0) start_ix2 = 7; else start_ix2 = 3; break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: case OMP_CLAUSE_SCHEDULE_GUIDED: if ((region->inner->sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0 && !region->inner->has_lastprivate_conditional) { start_ix2 = 3 + region->inner->sched_kind; break; } /* FALLTHRU */ default: start_ix2 = region->inner->sched_kind; break; } start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC; start_ix = (enum built_in_function) start_ix2; break; case GIMPLE_OMP_SECTIONS: start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS; break; default: gcc_unreachable (); } } /* By default, the value of NUM_THREADS is zero (selected at run time) and there is no conditional. */ cond = NULL_TREE; val = build_int_cst (unsigned_type_node, 0); flags = build_int_cst (unsigned_type_node, 0); c = omp_find_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = omp_find_clause (clauses, OMP_CLAUSE_NUM_THREADS); if (c) { val = OMP_CLAUSE_NUM_THREADS_EXPR (c); clause_loc = OMP_CLAUSE_LOCATION (c); } else clause_loc = gimple_location (entry_stmt); c = omp_find_clause (clauses, OMP_CLAUSE_PROC_BIND); if (c) flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c)); /* Ensure 'val' is of the correct type. */ val = fold_convert_loc (clause_loc, unsigned_type_node, val); /* If we found the clause 'if (cond)', build either (cond != 0) or (cond ? val : 1u). */ if (cond) { cond = gimple_boolify (cond); if (integer_zerop (val)) val = fold_build2_loc (clause_loc, EQ_EXPR, unsigned_type_node, cond, build_int_cst (TREE_TYPE (cond), 0)); else { basic_block cond_bb, then_bb, else_bb; edge e, e_then, e_else; tree tmp_then, tmp_else, tmp_join, tmp_var; tmp_var = create_tmp_var (TREE_TYPE (val)); if (gimple_in_ssa_p (cfun)) { tmp_then = make_ssa_name (tmp_var); tmp_else = make_ssa_name (tmp_var); tmp_join = make_ssa_name (tmp_var); } else { tmp_then = tmp_var; tmp_else = tmp_var; tmp_join = tmp_var; } e = split_block_after_labels (bb); cond_bb = e->src; bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); stmt = gimple_build_cond_empty (cond); gsi = gsi_start_bb (cond_bb); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); expand_omp_build_assign (&gsi, tmp_then, val, true); gsi = gsi_start_bb (else_bb); expand_omp_build_assign (&gsi, tmp_else, build_int_cst (unsigned_type_node, 1), true); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); add_bb_to_loop (then_bb, cond_bb->loop_father); add_bb_to_loop (else_bb, cond_bb->loop_father); e_then = make_edge (then_bb, bb, EDGE_FALLTHRU); e_else = make_edge (else_bb, bb, EDGE_FALLTHRU); if (gimple_in_ssa_p (cfun)) { gphi *phi = create_phi_node (tmp_join, bb); add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION); add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION); } val = tmp_join; } gsi = gsi_start_bb (bb); val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } gsi = gsi_last_nondebug_bb (bb); t = gimple_omp_parallel_data_arg (entry_stmt); if (t == NULL) t1 = null_pointer_node; else t1 = build_fold_addr_expr (t); tree child_fndecl = gimple_omp_parallel_child_fn (entry_stmt); t2 = build_fold_addr_expr (child_fndecl); vec_alloc (args, 4 + vec_safe_length (ws_args)); args->quick_push (t2); args->quick_push (t1); args->quick_push (val); if (ws_args) args->splice (*ws_args); args->quick_push (flags); t = build_call_expr_loc_vec (UNKNOWN_LOCATION, builtin_decl_explicit (start_ix), args); if (rtmp) { tree type = TREE_TYPE (OMP_CLAUSE_DECL (rtmp)); t = build2 (MODIFY_EXPR, type, OMP_CLAUSE_DECL (rtmp), fold_convert (type, fold_convert (pointer_sized_int_node, t))); } force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Build the function call to GOMP_task to actually generate the task operation. BB is the block where to insert the code. */ static void expand_task_call (struct omp_region *region, basic_block bb, gomp_task *entry_stmt) { tree t1, t2, t3; gimple_stmt_iterator gsi; location_t loc = gimple_location (entry_stmt); tree clauses = gimple_omp_task_clauses (entry_stmt); tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF); tree untied = omp_find_clause (clauses, OMP_CLAUSE_UNTIED); tree mergeable = omp_find_clause (clauses, OMP_CLAUSE_MERGEABLE); tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND); tree finalc = omp_find_clause (clauses, OMP_CLAUSE_FINAL); tree priority = omp_find_clause (clauses, OMP_CLAUSE_PRIORITY); unsigned int iflags = (untied ? GOMP_TASK_FLAG_UNTIED : 0) | (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0) | (depend ? GOMP_TASK_FLAG_DEPEND : 0); bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt); tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE; tree num_tasks = NULL_TREE; bool ull = false; if (taskloop_p) { gimple *g = last_stmt (region->outer->entry); gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR && gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP); struct omp_for_data fd; omp_extract_for_data (as_a <gomp_for *> (g), &fd, NULL); startvar = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); endvar = omp_find_clause (OMP_CLAUSE_CHAIN (startvar), OMP_CLAUSE__LOOPTEMP_); startvar = OMP_CLAUSE_DECL (startvar); endvar = OMP_CLAUSE_DECL (endvar); step = fold_convert_loc (loc, fd.iter_type, fd.loop.step); if (fd.loop.cond_code == LT_EXPR) iflags |= GOMP_TASK_FLAG_UP; tree tclauses = gimple_omp_for_clauses (g); num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_NUM_TASKS); if (num_tasks) num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks); else { num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_GRAINSIZE); if (num_tasks) { iflags |= GOMP_TASK_FLAG_GRAINSIZE; num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks); } else num_tasks = integer_zero_node; } num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks); if (ifc == NULL_TREE) iflags |= GOMP_TASK_FLAG_IF; if (omp_find_clause (tclauses, OMP_CLAUSE_NOGROUP)) iflags |= GOMP_TASK_FLAG_NOGROUP; ull = fd.iter_type == long_long_unsigned_type_node; if (omp_find_clause (clauses, OMP_CLAUSE_REDUCTION)) iflags |= GOMP_TASK_FLAG_REDUCTION; } else if (priority) iflags |= GOMP_TASK_FLAG_PRIORITY; tree flags = build_int_cst (unsigned_type_node, iflags); tree cond = boolean_true_node; if (ifc) { if (taskloop_p) { tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc)); t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t, build_int_cst (unsigned_type_node, GOMP_TASK_FLAG_IF), build_int_cst (unsigned_type_node, 0)); flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t); } else cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc)); } if (finalc) { tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc)); t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t, build_int_cst (unsigned_type_node, GOMP_TASK_FLAG_FINAL), build_int_cst (unsigned_type_node, 0)); flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t); } if (depend) depend = OMP_CLAUSE_DECL (depend); else depend = build_int_cst (ptr_type_node, 0); if (priority) priority = fold_convert (integer_type_node, OMP_CLAUSE_PRIORITY_EXPR (priority)); else priority = integer_zero_node; gsi = gsi_last_nondebug_bb (bb); tree t = gimple_omp_task_data_arg (entry_stmt); if (t == NULL) t2 = null_pointer_node; else t2 = build_fold_addr_expr_loc (loc, t); t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt)); t = gimple_omp_task_copy_fn (entry_stmt); if (t == NULL) t3 = null_pointer_node; else t3 = build_fold_addr_expr_loc (loc, t); if (taskloop_p) t = build_call_expr (ull ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL) : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP), 11, t1, t2, t3, gimple_omp_task_arg_size (entry_stmt), gimple_omp_task_arg_align (entry_stmt), flags, num_tasks, priority, startvar, endvar, step); else t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK), 9, t1, t2, t3, gimple_omp_task_arg_size (entry_stmt), gimple_omp_task_arg_align (entry_stmt), cond, flags, depend, priority); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Build the function call to GOMP_taskwait_depend to actually generate the taskwait operation. BB is the block where to insert the code. */ static void expand_taskwait_call (basic_block bb, gomp_task *entry_stmt) { tree clauses = gimple_omp_task_clauses (entry_stmt); tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND); if (depend == NULL_TREE) return; depend = OMP_CLAUSE_DECL (depend); gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb); tree t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT_DEPEND), 1, depend); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Build the function call to GOMP_teams_reg to actually generate the host teams operation. REGION is the teams region being expanded. BB is the block where to insert the code. */ static void expand_teams_call (basic_block bb, gomp_teams *entry_stmt) { tree clauses = gimple_omp_teams_clauses (entry_stmt); tree num_teams = omp_find_clause (clauses, OMP_CLAUSE_NUM_TEAMS); if (num_teams == NULL_TREE) num_teams = build_int_cst (unsigned_type_node, 0); else { num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams); num_teams = fold_convert (unsigned_type_node, num_teams); } tree thread_limit = omp_find_clause (clauses, OMP_CLAUSE_THREAD_LIMIT); if (thread_limit == NULL_TREE) thread_limit = build_int_cst (unsigned_type_node, 0); else { thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit); thread_limit = fold_convert (unsigned_type_node, thread_limit); } gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb); tree t = gimple_omp_teams_data_arg (entry_stmt), t1; if (t == NULL) t1 = null_pointer_node; else t1 = build_fold_addr_expr (t); tree child_fndecl = gimple_omp_teams_child_fn (entry_stmt); tree t2 = build_fold_addr_expr (child_fndecl); vec<tree, va_gc> *args; vec_alloc (args, 5); args->quick_push (t2); args->quick_push (t1); args->quick_push (num_teams); args->quick_push (thread_limit); /* For future extensibility. */ args->quick_push (build_zero_cst (unsigned_type_node)); t = build_call_expr_loc_vec (UNKNOWN_LOCATION, builtin_decl_explicit (BUILT_IN_GOMP_TEAMS_REG), args); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ static tree vec2chain (vec<tree, va_gc> *v) { tree chain = NULL_TREE, t; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t) { DECL_CHAIN (t) = chain; chain = t; } return chain; } /* Remove barriers in REGION->EXIT's block. Note that this is only valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be removed. */ static void remove_exit_barrier (struct omp_region *region) { gimple_stmt_iterator gsi; basic_block exit_bb; edge_iterator ei; edge e; gimple *stmt; int any_addressable_vars = -1; exit_bb = region->exit; /* If the parallel region doesn't return, we don't have REGION->EXIT block at all. */ if (! exit_bb) return; /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of statements that can appear in between are extremely limited -- no memory operations at all. Here, we allow nothing at all, so the only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */ gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gsi_prev_nondebug (&gsi); if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL) return; FOR_EACH_EDGE (e, ei, exit_bb->preds) { gsi = gsi_last_nondebug_bb (e->src); if (gsi_end_p (gsi)) continue; stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_OMP_RETURN && !gimple_omp_return_nowait_p (stmt)) { /* OpenMP 3.0 tasks unfortunately prevent this optimization in many cases. If there could be tasks queued, the barrier might be needed to let the tasks run before some local variable of the parallel that the task uses as shared runs out of scope. The task can be spawned either from within current function (this would be easy to check) or from some function it calls and gets passed an address of such a variable. */ if (any_addressable_vars < 0) { gomp_parallel *parallel_stmt = as_a <gomp_parallel *> (last_stmt (region->entry)); tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt); tree local_decls, block, decl; unsigned ix; any_addressable_vars = 0; FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl) if (TREE_ADDRESSABLE (decl)) { any_addressable_vars = 1; break; } for (block = gimple_block (stmt); !any_addressable_vars && block && TREE_CODE (block) == BLOCK; block = BLOCK_SUPERCONTEXT (block)) { for (local_decls = BLOCK_VARS (block); local_decls; local_decls = DECL_CHAIN (local_decls)) if (TREE_ADDRESSABLE (local_decls)) { any_addressable_vars = 1; break; } if (block == gimple_block (parallel_stmt)) break; } } if (!any_addressable_vars) gimple_omp_return_set_nowait (stmt); } } } static void remove_exit_barriers (struct omp_region *region) { if (region->type == GIMPLE_OMP_PARALLEL) remove_exit_barrier (region); if (region->inner) { region = region->inner; remove_exit_barriers (region); while (region->next) { region = region->next; remove_exit_barriers (region); } } } /* Optimize omp_get_thread_num () and omp_get_num_threads () calls. These can't be declared as const functions, but within one parallel body they are constant, so they can be transformed there into __builtin_omp_get_{thread_num,num_threads} () which are declared const. Similarly for task body, except that in untied task omp_get_thread_num () can change at any task scheduling point. */ static void optimize_omp_library_calls (gimple *entry_stmt) { basic_block bb; gimple_stmt_iterator gsi; tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree); tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree); bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK && omp_find_clause (gimple_omp_task_clauses (entry_stmt), OMP_CLAUSE_UNTIED) != NULL); FOR_EACH_BB_FN (bb, cfun) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *call = gsi_stmt (gsi); tree decl; if (is_gimple_call (call) && (decl = gimple_call_fndecl (call)) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl) && DECL_INITIAL (decl) == NULL) { tree built_in; if (DECL_NAME (decl) == thr_num_id) { /* In #pragma omp task untied omp_get_thread_num () can change during the execution of the task region. */ if (untied_task) continue; built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); } else if (DECL_NAME (decl) == num_thr_id) built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); else continue; if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in) || gimple_call_num_args (call) != 0) continue; if (flag_exceptions && !TREE_NOTHROW (decl)) continue; if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)), TREE_TYPE (TREE_TYPE (built_in)))) continue; gimple_call_set_fndecl (call, built_in); } } } /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be regimplified. */ static tree expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *) { tree t = *tp; /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */ if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t)) return t; if (TREE_CODE (t) == ADDR_EXPR) recompute_tree_invariant_for_addr_expr (t); *walk_subtrees = !TYPE_P (t) && !DECL_P (t); return NULL_TREE; } /* Prepend or append TO = FROM assignment before or after *GSI_P. */ static void expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from, bool after) { bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to); from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE, !after, after ? GSI_CONTINUE_LINKING : GSI_SAME_STMT); gimple *stmt = gimple_build_assign (to, from); if (after) gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING); else gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT); if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL) || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL)) { gimple_stmt_iterator gsi = gsi_for_stmt (stmt); gimple_regimplify_operands (stmt, &gsi); } } /* Expand the OpenMP parallel or task directive starting at REGION. */ static void expand_omp_taskreg (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t; gimple_stmt_iterator gsi; gimple *entry_stmt, *stmt; edge e; vec<tree, va_gc> *ws_args; entry_stmt = last_stmt (region->entry); if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK && gimple_omp_task_taskwait_p (entry_stmt)) { new_bb = region->entry; gsi = gsi_last_nondebug_bb (region->entry); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK); gsi_remove (&gsi, true); expand_taskwait_call (new_bb, as_a <gomp_task *> (entry_stmt)); return; } child_fn = gimple_omp_taskreg_child_fn (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); entry_bb = region->entry; if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK) exit_bb = region->cont; else exit_bb = region->exit; if (is_combined_parallel (region)) ws_args = region->ws_args; else ws_args = NULL; if (child_cfun->cfg) { /* Due to inlining, it may happen that we have already outlined the region, in which case all we need to do is make the sub-graph unreachable and emit the parallel call. */ edge entry_succ_e, exit_succ_e; entry_succ_e = single_succ_edge (entry_bb); gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TEAMS); gsi_remove (&gsi, true); new_bb = entry_bb; if (exit_bb) { exit_succ_e = single_succ_edge (exit_bb); make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); } remove_edge_and_dominated_blocks (entry_succ_e); } else { unsigned srcidx, dstidx, num; /* If the parallel region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the parallel body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ if (gimple_omp_taskreg_data_arg (entry_stmt)) { basic_block entry_succ_bb = single_succ_p (entry_bb) ? single_succ (entry_bb) : FALLTHRU_EDGE (entry_bb)->dest; tree arg; gimple *parcopy_stmt = NULL; for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) { gimple *stmt; gcc_assert (!gsi_end_p (gsi)); stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_ASSIGN) continue; if (gimple_num_ops (stmt) == 2) { tree arg = gimple_assign_rhs1 (stmt); /* We're ignore the subcode because we're effectively doing a STRIP_NOPS. */ if (TREE_CODE (arg) == ADDR_EXPR && (TREE_OPERAND (arg, 0) == gimple_omp_taskreg_data_arg (entry_stmt))) { parcopy_stmt = stmt; break; } } } gcc_assert (parcopy_stmt != NULL); arg = DECL_ARGUMENTS (child_fn); if (!gimple_in_ssa_p (cfun)) { if (gimple_assign_lhs (parcopy_stmt) == arg) gsi_remove (&gsi, true); else { /* ?? Is setting the subcode really necessary ?? */ gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg)); gimple_assign_set_rhs1 (parcopy_stmt, arg); } } else { tree lhs = gimple_assign_lhs (parcopy_stmt); gcc_assert (SSA_NAME_VAR (lhs) == arg); /* We'd like to set the rhs to the default def in the child_fn, but it's too early to create ssa names in the child_fn. Instead, we set the rhs to the parm. In move_sese_region_to_fn, we introduce a default def for the parm, map the parm to it's default def, and once we encounter this stmt, replace the parm with the default def. */ gimple_assign_set_rhs1 (parcopy_stmt, arg); update_stmt (parcopy_stmt); } } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); /* The gimplifier could record temporaries in parallel/task block rather than in containing function's local_decls chain, which would mean cgraph missed finalizing them. Do it now. */ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t)) varpool_node::finalize_decl (t); DECL_SAVED_TREE (child_fn) = NULL; /* We'll create a CFG for child_fn, so no gimple body is needed. */ gimple_set_body (child_fn, NULL); TREE_USED (block) = 1; /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK, so that it can be moved to the child function. */ gsi = gsi_last_nondebug_bb (entry_bb); stmt = gsi_stmt (gsi); gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL || gimple_code (stmt) == GIMPLE_OMP_TASK || gimple_code (stmt) == GIMPLE_OMP_TEAMS)); e = split_block (entry_bb, stmt); gsi_remove (&gsi, true); entry_bb = e->dest; edge e2 = NULL; if (gimple_code (entry_stmt) != GIMPLE_OMP_TASK) single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; else { e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL); gcc_assert (e2->dest == region->exit); remove_edge (BRANCH_EDGE (entry_bb)); set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src); gsi = gsi_last_nondebug_bb (region->exit); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gsi_remove (&gsi, true); } /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */ if (exit_bb) { gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (!gsi_end_p (gsi) && (gimple_code (gsi_stmt (gsi)) == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN))); stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); } /* Move the parallel region into CHILD_CFUN. */ if (gimple_in_ssa_p (cfun)) { init_tree_ssa (child_cfun); init_ssa_operands (child_cfun); child_cfun->gimple_df->in_ssa_p = true; block = NULL_TREE; } else block = gimple_block (entry_stmt); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; if (e2) { basic_block dest_bb = e2->dest; if (!exit_bb) make_edge (new_bb, dest_bb, EDGE_FALLTHRU); remove_edge (e2); set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb); } /* When the OMP expansion process cannot guarantee an up-to-date loop tree arrange for the child function to fixup loops. */ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ num = vec_safe_length (child_cfun->local_decls); for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) { t = (*child_cfun->local_decls)[srcidx]; if (DECL_CONTEXT (t) == cfun->decl) continue; if (srcidx != dstidx) (*child_cfun->local_decls)[dstidx] = t; dstidx++; } if (dstidx != num) vec_safe_truncate (child_cfun->local_decls, dstidx); /* Inform the callgraph about the new function. */ child_cfun->curr_properties = cfun->curr_properties; child_cfun->has_simduid_loops |= cfun->has_simduid_loops; child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops; cgraph_node *node = cgraph_node::get_create (child_fn); node->parallelized_function = 1; cgraph_node::add_new_function (child_fn, true); bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl) && !DECL_ASSEMBLER_NAME_SET_P (child_fn); /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); if (need_asm) assign_assembler_name_if_needed (child_fn); if (optimize) optimize_omp_library_calls (entry_stmt); update_max_bb_count (); cgraph_edge::rebuild_edges (); /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; bool changed = false; FOR_EACH_BB_FN (bb, cfun) changed |= gimple_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); } if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa); if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) verify_loop_structure (); pop_cfun (); if (dump_file && !gimple_in_ssa_p (cfun)) { omp_any_child_fn_dumped = true; dump_function_header (dump_file, child_fn, dump_flags); dump_function_to_file (child_fn, dump_file, dump_flags); } } adjust_context_and_scope (region, gimple_block (entry_stmt), child_fn); if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL) expand_parallel_call (region, new_bb, as_a <gomp_parallel *> (entry_stmt), ws_args); else if (gimple_code (entry_stmt) == GIMPLE_OMP_TEAMS) expand_teams_call (new_bb, as_a <gomp_teams *> (entry_stmt)); else expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt)); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_only_virtuals); } /* Information about members of an OpenACC collapsed loop nest. */ struct oacc_collapse { tree base; /* Base value. */ tree iters; /* Number of steps. */ tree step; /* Step size. */ tree tile; /* Tile increment (if tiled). */ tree outer; /* Tile iterator var. */ }; /* Helper for expand_oacc_for. Determine collapsed loop information. Fill in COUNTS array. Emit any initialization code before GSI. Return the calculated outer loop bound of BOUND_TYPE. */ static tree expand_oacc_collapse_init (const struct omp_for_data *fd, gimple_stmt_iterator *gsi, oacc_collapse *counts, tree bound_type, location_t loc) { tree tiling = fd->tiling; tree total = build_int_cst (bound_type, 1); int ix; gcc_assert (integer_onep (fd->loop.step)); gcc_assert (integer_zerop (fd->loop.n1)); /* When tiling, the first operand of the tile clause applies to the innermost loop, and we work outwards from there. Seems backwards, but whatever. */ for (ix = fd->collapse; ix--;) { const omp_for_data_loop *loop = &fd->loops[ix]; tree iter_type = TREE_TYPE (loop->v); tree diff_type = iter_type; tree plus_type = iter_type; gcc_assert (loop->cond_code == fd->loop.cond_code); if (POINTER_TYPE_P (iter_type)) plus_type = sizetype; if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type)) diff_type = signed_type_for (diff_type); if (TYPE_PRECISION (diff_type) < TYPE_PRECISION (integer_type_node)) diff_type = integer_type_node; if (tiling) { tree num = build_int_cst (integer_type_node, fd->collapse); tree loop_no = build_int_cst (integer_type_node, ix); tree tile = TREE_VALUE (tiling); gcall *call = gimple_build_call_internal (IFN_GOACC_TILE, 5, num, loop_no, tile, /* gwv-outer=*/integer_zero_node, /* gwv-inner=*/integer_zero_node); counts[ix].outer = create_tmp_var (iter_type, ".outer"); counts[ix].tile = create_tmp_var (diff_type, ".tile"); gimple_call_set_lhs (call, counts[ix].tile); gimple_set_location (call, loc); gsi_insert_before (gsi, call, GSI_SAME_STMT); tiling = TREE_CHAIN (tiling); } else { counts[ix].tile = NULL; counts[ix].outer = loop->v; } tree b = loop->n1; tree e = loop->n2; tree s = loop->step; bool up = loop->cond_code == LT_EXPR; tree dir = build_int_cst (diff_type, up ? +1 : -1); bool negating; tree expr; b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE, true, GSI_SAME_STMT); e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE, true, GSI_SAME_STMT); /* Convert the step, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (TREE_TYPE (s)); if (negating) s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s); s = fold_convert (diff_type, s); if (negating) s = fold_build1 (NEGATE_EXPR, diff_type, s); s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE, true, GSI_SAME_STMT); /* Determine the range, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (iter_type); expr = fold_build2 (MINUS_EXPR, plus_type, fold_convert (plus_type, negating ? b : e), fold_convert (plus_type, negating ? e : b)); expr = fold_convert (diff_type, expr); if (negating) expr = fold_build1 (NEGATE_EXPR, diff_type, expr); tree range = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); /* Determine number of iterations. */ expr = fold_build2 (MINUS_EXPR, diff_type, range, dir); expr = fold_build2 (PLUS_EXPR, diff_type, expr, s); expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s); tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); counts[ix].base = b; counts[ix].iters = iters; counts[ix].step = s; total = fold_build2 (MULT_EXPR, bound_type, total, fold_convert (bound_type, iters)); } return total; } /* Emit initializers for collapsed loop members. INNER is true if this is for the element loop of a TILE. IVAR is the outer loop iteration variable, from which collapsed loop iteration values are calculated. COUNTS array has been initialized by expand_oacc_collapse_inits. */ static void expand_oacc_collapse_vars (const struct omp_for_data *fd, bool inner, gimple_stmt_iterator *gsi, const oacc_collapse *counts, tree ivar) { tree ivar_type = TREE_TYPE (ivar); /* The most rapidly changing iteration variable is the innermost one. */ for (int ix = fd->collapse; ix--;) { const omp_for_data_loop *loop = &fd->loops[ix]; const oacc_collapse *collapse = &counts[ix]; tree v = inner ? loop->v : collapse->outer; tree iter_type = TREE_TYPE (v); tree diff_type = TREE_TYPE (collapse->step); tree plus_type = iter_type; enum tree_code plus_code = PLUS_EXPR; tree expr; if (POINTER_TYPE_P (iter_type)) { plus_code = POINTER_PLUS_EXPR; plus_type = sizetype; } expr = ivar; if (ix) { tree mod = fold_convert (ivar_type, collapse->iters); ivar = fold_build2 (TRUNC_DIV_EXPR, ivar_type, expr, mod); expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, expr, mod); ivar = force_gimple_operand_gsi (gsi, ivar, true, NULL_TREE, true, GSI_SAME_STMT); } expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr), collapse->step); expr = fold_build2 (plus_code, iter_type, inner ? collapse->outer : collapse->base, fold_convert (plus_type, expr)); expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); gassign *ass = gimple_build_assign (v, expr); gsi_insert_before (gsi, ass, GSI_SAME_STMT); } } /* Helper function for expand_omp_{for_*,simd}. If this is the outermost of the combined collapse > 1 loop constructs, generate code like: if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB; if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; Furthermore, if ZERO_ITER_BB is NULL, create a BB which does: count = 0; and set ZERO_ITER_BB to that bb. If this isn't the outermost of the combined loop constructs, just initialize COUNTS array from the _looptemp_ clauses. For loop nests with non-rectangular loops, do this only for the rectangular loops. Then pick the loops which reference outer vars in their bound expressions and the loops which they refer to and for this sub-nest compute number of iterations. For triangular loops use Faulhaber's formula, otherwise as a fallback, compute by iterating the loops. If e.g. the sub-nest is for (I = N11; I COND1 N12; I += STEP1) for (J = M21 * I + N21; J COND2 M22 * I + N22; J += STEP2) for (K = M31 * J + N31; K COND3 M32 * J + N32; K += STEP3) do: COUNT = 0; for (tmpi = N11; tmpi COND1 N12; tmpi += STEP1) for (tmpj = M21 * tmpi + N21; tmpj COND2 M22 * tmpi + N22; tmpj += STEP2) { int tmpk1 = M31 * tmpj + N31; int tmpk2 = M32 * tmpj + N32; if (tmpk1 COND3 tmpk2) { if (COND3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; COUNT += (adj + tmpk2 - tmpk1) / STEP3; } } and finally multiply the counts of the rectangular loops not in the sub-nest with COUNT. Also, as counts[fd->last_nonrect] store number of iterations of the loops from fd->first_nonrect to fd->last_nonrect inclusive, i.e. the above COUNT multiplied by the counts of rectangular loops not referenced in any non-rectangular loops sandwitched in between those. */ /* NOTE: It *could* be better to moosh all of the BBs together, creating one larger BB with all the computation and the unexpected jump at the end. I.e. bool zero3, zero2, zero1, zero; zero3 = N32 c3 N31; count3 = (N32 - N31) /[cl] STEP3; zero2 = N22 c2 N21; count2 = (N22 - N21) /[cl] STEP2; zero1 = N12 c1 N11; count1 = (N12 - N11) /[cl] STEP1; zero = zero3 || zero2 || zero1; count = count1 * count2 * count3; if (__builtin_expect(zero, false)) goto zero_iter_bb; After all, we expect the zero=false, and thus we expect to have to evaluate all of the comparison expressions, so short-circuiting oughtn't be a win. Since the condition isn't protecting a denominator, we're not concerned about divide-by-zero, so we can fully evaluate count even if a numerator turned out to be wrong. It seems like putting this all together would create much better scheduling opportunities, and less pressure on the chip's branch predictor. */ static void expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi, basic_block &entry_bb, tree *counts, basic_block &zero_iter1_bb, int &first_zero_iter1, basic_block &zero_iter2_bb, int &first_zero_iter2, basic_block &l2_dom_bb) { tree t, type = TREE_TYPE (fd->loop.v); edge e, ne; int i; /* Collapsed loops need work for expansion into SSA form. */ gcc_assert (!gimple_in_ssa_p (cfun)); if (gimple_omp_for_combined_into_p (fd->for_stmt) && TREE_CODE (fd->loop.n2) != INTEGER_CST) { gcc_assert (fd->ordered == 0); /* First two _looptemp_ clauses are for istart/iend, counts[0] isn't supposed to be handled, as the inner loop doesn't use it. */ tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); for (i = 0; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); if (i) counts[i] = OMP_CLAUSE_DECL (innerc); else counts[0] = NULL_TREE; } if (fd->non_rect && fd->last_nonrect == fd->first_nonrect + 1 && !TYPE_UNSIGNED (TREE_TYPE (fd->loops[fd->last_nonrect].v))) { tree c[4]; for (i = 0; i < 4; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); c[i] = OMP_CLAUSE_DECL (innerc); } counts[0] = c[0]; fd->first_inner_iterations = c[1]; fd->factor = c[2]; fd->adjn1 = c[3]; } return; } for (i = fd->collapse; i < fd->ordered; i++) { tree itype = TREE_TYPE (fd->loops[i].v); counts[i] = NULL_TREE; t = fold_binary (fd->loops[i].cond_code, boolean_type_node, fold_convert (itype, fd->loops[i].n1), fold_convert (itype, fd->loops[i].n2)); if (t && integer_zerop (t)) { for (i = fd->collapse; i < fd->ordered; i++) counts[i] = build_int_cst (type, 0); break; } } bool rect_count_seen = false; for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++) { tree itype = TREE_TYPE (fd->loops[i].v); if (i >= fd->collapse && counts[i]) continue; if (fd->non_rect) { /* Skip loops that use outer iterators in their expressions during this phase. */ if (fd->loops[i].m1 || fd->loops[i].m2) { counts[i] = build_zero_cst (type); continue; } } if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse) && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node, fold_convert (itype, fd->loops[i].n1), fold_convert (itype, fd->loops[i].n2))) == NULL_TREE || !integer_onep (t))) { gcond *cond_stmt; tree n1, n2; n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1)); n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2)); n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { *gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, gsi); } e = split_block (entry_bb, cond_stmt); basic_block &zero_iter_bb = i < fd->collapse ? zero_iter1_bb : zero_iter2_bb; int &first_zero_iter = i < fd->collapse ? first_zero_iter1 : first_zero_iter2; if (zero_iter_bb == NULL) { gassign *assign_stmt; first_zero_iter = i; zero_iter_bb = create_empty_bb (entry_bb); add_bb_to_loop (zero_iter_bb, entry_bb->loop_father); *gsi = gsi_after_labels (zero_iter_bb); if (i < fd->collapse) assign_stmt = gimple_build_assign (fd->loop.n2, build_zero_cst (type)); else { counts[i] = create_tmp_reg (type, ".count"); assign_stmt = gimple_build_assign (counts[i], build_zero_cst (type)); } gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT); set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb, entry_bb); } ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE); ne->probability = profile_probability::very_unlikely (); e->flags = EDGE_TRUE_VALUE; e->probability = ne->probability.invert (); if (l2_dom_bb == NULL) l2_dom_bb = entry_bb; entry_bb = e->dest; *gsi = gsi_last_nondebug_bb (entry_bb); } if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[i].step), t); t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n2)); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n1)); /* ?? We could probably use CEIL_DIV_EXPR instead of TRUNC_DIV_EXPR and adjusting by hand. Unless we can't generate the same code in the end because generically we don't know that the values involved must be negative for GT?? */ if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fold_convert (itype, fd->loops[i].step))); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); t = fold_convert (type, t); if (TREE_CODE (t) == INTEGER_CST) counts[i] = t; else { if (i < fd->collapse || i != first_zero_iter2) counts[i] = create_tmp_reg (type, ".count"); expand_omp_build_assign (gsi, counts[i], t); } if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse) { if (fd->non_rect && i >= fd->first_nonrect && i <= fd->last_nonrect) continue; if (!rect_count_seen) { t = counts[i]; rect_count_seen = true; } else t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]); expand_omp_build_assign (gsi, fd->loop.n2, t); } } if (fd->non_rect && SSA_VAR_P (fd->loop.n2)) { gcc_assert (fd->last_nonrect != -1); counts[fd->last_nonrect] = create_tmp_reg (type, ".count"); expand_omp_build_assign (gsi, counts[fd->last_nonrect], build_zero_cst (type)); for (i = fd->first_nonrect + 1; i < fd->last_nonrect; i++) if (fd->loops[i].m1 || fd->loops[i].m2 || fd->loops[i].non_rect_referenced) break; if (i == fd->last_nonrect && fd->loops[i].outer == fd->last_nonrect - fd->first_nonrect && !TYPE_UNSIGNED (TREE_TYPE (fd->loops[i].v))) { int o = fd->first_nonrect; tree itype = TREE_TYPE (fd->loops[o].v); tree n1o = create_tmp_reg (itype, ".n1o"); t = fold_convert (itype, unshare_expr (fd->loops[o].n1)); expand_omp_build_assign (gsi, n1o, t); tree n2o = create_tmp_reg (itype, ".n2o"); t = fold_convert (itype, unshare_expr (fd->loops[o].n2)); expand_omp_build_assign (gsi, n2o, t); if (fd->loops[i].m1 && fd->loops[i].m2) t = fold_build2 (MINUS_EXPR, itype, unshare_expr (fd->loops[i].m2), unshare_expr (fd->loops[i].m1)); else if (fd->loops[i].m1) t = fold_unary (NEGATE_EXPR, itype, unshare_expr (fd->loops[i].m1)); else t = unshare_expr (fd->loops[i].m2); tree m2minusm1 = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); gimple_stmt_iterator gsi2 = *gsi; gsi_prev (&gsi2); e = split_block (entry_bb, gsi_stmt (gsi2)); e = split_block (e->dest, (gimple *) NULL); basic_block bb1 = e->src; entry_bb = e->dest; *gsi = gsi_after_labels (entry_bb); gsi2 = gsi_after_labels (bb1); tree ostep = fold_convert (itype, fd->loops[o].step); t = build_int_cst (itype, (fd->loops[o].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, ostep, t); t = fold_build2 (PLUS_EXPR, itype, t, n2o); t = fold_build2 (MINUS_EXPR, itype, t, n1o); if (TYPE_UNSIGNED (itype) && fd->loops[o].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, ostep)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, ostep); tree outer_niters = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (MINUS_EXPR, itype, outer_niters, build_one_cst (itype)); t = fold_build2 (MULT_EXPR, itype, t, ostep); t = fold_build2 (PLUS_EXPR, itype, n1o, t); tree last = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); tree n1, n2, n1e, n2e; t = fold_convert (itype, unshare_expr (fd->loops[i].n1)); if (fd->loops[i].m1) { n1 = fold_convert (itype, unshare_expr (fd->loops[i].m1)); n1 = fold_build2 (MULT_EXPR, itype, n1o, n1); n1 = fold_build2 (PLUS_EXPR, itype, n1, t); } else n1 = t; n1 = force_gimple_operand_gsi (&gsi2, n1, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_convert (itype, unshare_expr (fd->loops[i].n2)); if (fd->loops[i].m2) { n2 = fold_convert (itype, unshare_expr (fd->loops[i].m2)); n2 = fold_build2 (MULT_EXPR, itype, n1o, n2); n2 = fold_build2 (PLUS_EXPR, itype, n2, t); } else n2 = t; n2 = force_gimple_operand_gsi (&gsi2, n2, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_convert (itype, unshare_expr (fd->loops[i].n1)); if (fd->loops[i].m1) { n1e = fold_convert (itype, unshare_expr (fd->loops[i].m1)); n1e = fold_build2 (MULT_EXPR, itype, last, n1e); n1e = fold_build2 (PLUS_EXPR, itype, n1e, t); } else n1e = t; n1e = force_gimple_operand_gsi (&gsi2, n1e, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_convert (itype, unshare_expr (fd->loops[i].n2)); if (fd->loops[i].m2) { n2e = fold_convert (itype, unshare_expr (fd->loops[i].m2)); n2e = fold_build2 (MULT_EXPR, itype, last, n2e); n2e = fold_build2 (PLUS_EXPR, itype, n2e, t); } else n2e = t; n2e = force_gimple_operand_gsi (&gsi2, n2e, true, NULL_TREE, true, GSI_SAME_STMT); gcond *cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT); e = split_block (bb1, cond_stmt); e->flags = EDGE_TRUE_VALUE; e->probability = profile_probability::likely ().guessed (); basic_block bb2 = e->dest; gsi2 = gsi_after_labels (bb2); cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1e, n2e, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT); e = split_block (bb2, cond_stmt); e->flags = EDGE_TRUE_VALUE; e->probability = profile_probability::likely ().guessed (); gsi2 = gsi_after_labels (e->dest); tree step = fold_convert (itype, fd->loops[i].step); t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, step, t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, n1); if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); tree first_inner_iterations = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (MULT_EXPR, itype, m2minusm1, ostep); if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); tree factor = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (MINUS_EXPR, itype, outer_niters, build_one_cst (itype)); t = fold_build2 (MULT_EXPR, itype, t, outer_niters); t = fold_build2 (RSHIFT_EXPR, itype, t, integer_one_node); t = fold_build2 (MULT_EXPR, itype, factor, t); t = fold_build2 (PLUS_EXPR, itype, fold_build2 (MULT_EXPR, itype, outer_niters, first_inner_iterations), t); expand_omp_build_assign (&gsi2, counts[fd->last_nonrect], fold_convert (type, t)); basic_block bb3 = create_empty_bb (bb1); add_bb_to_loop (bb3, bb1->loop_father); e = make_edge (bb1, bb3, EDGE_FALSE_VALUE); e->probability = profile_probability::unlikely ().guessed (); gsi2 = gsi_after_labels (bb3); cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1e, n2e, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT); e = split_block (bb3, cond_stmt); e->flags = EDGE_TRUE_VALUE; e->probability = profile_probability::likely ().guessed (); basic_block bb4 = e->dest; ne = make_edge (bb3, entry_bb, EDGE_FALSE_VALUE); ne->probability = e->probability.invert (); basic_block bb5 = create_empty_bb (bb2); add_bb_to_loop (bb5, bb2->loop_father); ne = make_edge (bb2, bb5, EDGE_FALSE_VALUE); ne->probability = profile_probability::unlikely ().guessed (); for (int j = 0; j < 2; j++) { gsi2 = gsi_after_labels (j ? bb5 : bb4); t = fold_build2 (MINUS_EXPR, itype, unshare_expr (fd->loops[i].n1), unshare_expr (fd->loops[i].n2)); t = fold_build2 (TRUNC_DIV_EXPR, itype, t, m2minusm1); tree tem = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (MINUS_EXPR, itype, tem, n1o); t = fold_build2 (TRUNC_MOD_EXPR, itype, t, ostep); t = fold_build2 (MINUS_EXPR, itype, tem, t); tem = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_convert (itype, unshare_expr (fd->loops[i].n1)); if (fd->loops[i].m1) { n1 = fold_convert (itype, unshare_expr (fd->loops[i].m1)); n1 = fold_build2 (MULT_EXPR, itype, tem, n1); n1 = fold_build2 (PLUS_EXPR, itype, n1, t); } else n1 = t; n1 = force_gimple_operand_gsi (&gsi2, n1, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_convert (itype, unshare_expr (fd->loops[i].n2)); if (fd->loops[i].m2) { n2 = fold_convert (itype, unshare_expr (fd->loops[i].m2)); n2 = fold_build2 (MULT_EXPR, itype, tem, n2); n2 = fold_build2 (PLUS_EXPR, itype, n2, t); } else n2 = t; n2 = force_gimple_operand_gsi (&gsi2, n2, true, NULL_TREE, true, GSI_SAME_STMT); expand_omp_build_assign (&gsi2, j ? n2o : n1o, tem); cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT); e = split_block (gsi_bb (gsi2), cond_stmt); e->flags = j ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE; e->probability = profile_probability::unlikely ().guessed (); ne = make_edge (e->src, bb1, j ? EDGE_FALSE_VALUE : EDGE_TRUE_VALUE); ne->probability = e->probability.invert (); gsi2 = gsi_after_labels (e->dest); t = fold_build2 (PLUS_EXPR, itype, tem, ostep); expand_omp_build_assign (&gsi2, j ? n2o : n1o, t); make_edge (e->dest, bb1, EDGE_FALLTHRU); } set_immediate_dominator (CDI_DOMINATORS, bb3, bb1); set_immediate_dominator (CDI_DOMINATORS, bb5, bb2); set_immediate_dominator (CDI_DOMINATORS, entry_bb, bb1); if (fd->first_nonrect + 1 == fd->last_nonrect) { fd->first_inner_iterations = first_inner_iterations; fd->factor = factor; fd->adjn1 = n1o; } } else { /* Fallback implementation. Evaluate the loops with m1/m2 non-NULL as well as their outer loops at runtime using temporaries instead of the original iteration variables, and in the body just bump the counter. */ gimple_stmt_iterator gsi2 = *gsi; gsi_prev (&gsi2); e = split_block (entry_bb, gsi_stmt (gsi2)); e = split_block (e->dest, (gimple *) NULL); basic_block cur_bb = e->src; basic_block next_bb = e->dest; entry_bb = e->dest; *gsi = gsi_after_labels (entry_bb); tree *vs = XALLOCAVEC (tree, fd->last_nonrect); memset (vs, 0, fd->last_nonrect * sizeof (tree)); for (i = 0; i <= fd->last_nonrect; i++) { if (fd->loops[i].m1 == NULL_TREE && fd->loops[i].m2 == NULL_TREE && !fd->loops[i].non_rect_referenced) continue; tree itype = TREE_TYPE (fd->loops[i].v); gsi2 = gsi_after_labels (cur_bb); tree n1, n2; t = fold_convert (itype, unshare_expr (fd->loops[i].n1)); if (fd->loops[i].m1) { n1 = fold_convert (itype, unshare_expr (fd->loops[i].m1)); n1 = fold_build2 (MULT_EXPR, itype, vs[i - fd->loops[i].outer], n1); n1 = fold_build2 (PLUS_EXPR, itype, n1, t); } else n1 = t; n1 = force_gimple_operand_gsi (&gsi2, n1, true, NULL_TREE, true, GSI_SAME_STMT); if (i < fd->last_nonrect) { vs[i] = create_tmp_reg (itype, ".it"); expand_omp_build_assign (&gsi2, vs[i], n1); } t = fold_convert (itype, unshare_expr (fd->loops[i].n2)); if (fd->loops[i].m2) { n2 = fold_convert (itype, unshare_expr (fd->loops[i].m2)); n2 = fold_build2 (MULT_EXPR, itype, vs[i - fd->loops[i].outer], n2); n2 = fold_build2 (PLUS_EXPR, itype, n2, t); } else n2 = t; n2 = force_gimple_operand_gsi (&gsi2, n2, true, NULL_TREE, true, GSI_SAME_STMT); if (i == fd->last_nonrect) { gcond *cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT); e = split_block (cur_bb, cond_stmt); e->flags = EDGE_TRUE_VALUE; ne = make_edge (cur_bb, next_bb, EDGE_FALSE_VALUE); e->probability = profile_probability::likely ().guessed (); ne->probability = e->probability.invert (); gsi2 = gsi_after_labels (e->dest); t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[i].step), t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, n1); tree step = fold_convert (itype, fd->loops[i].step); if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (type, t); t = fold_build2 (PLUS_EXPR, type, counts[fd->last_nonrect], t); t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); expand_omp_build_assign (&gsi2, counts[fd->last_nonrect], t); e = make_edge (e->dest, next_bb, EDGE_FALLTHRU); set_immediate_dominator (CDI_DOMINATORS, next_bb, cur_bb); break; } e = split_block (cur_bb, last_stmt (cur_bb)); basic_block new_cur_bb = create_empty_bb (cur_bb); add_bb_to_loop (new_cur_bb, cur_bb->loop_father); gsi2 = gsi_after_labels (e->dest); tree step = fold_convert (itype, unshare_expr (fd->loops[i].step)); t = fold_build2 (PLUS_EXPR, itype, vs[i], step); t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); expand_omp_build_assign (&gsi2, vs[i], t); ne = split_block (e->dest, last_stmt (e->dest)); gsi2 = gsi_after_labels (ne->dest); gcond *cond_stmt = gimple_build_cond (fd->loops[i].cond_code, vs[i], n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT); edge e3, e4; if (next_bb == entry_bb) { e3 = find_edge (ne->dest, next_bb); e3->flags = EDGE_FALSE_VALUE; } else e3 = make_edge (ne->dest, next_bb, EDGE_FALSE_VALUE); e4 = make_edge (ne->dest, new_cur_bb, EDGE_TRUE_VALUE); e4->probability = profile_probability::likely ().guessed (); e3->probability = e4->probability.invert (); basic_block esrc = e->src; make_edge (e->src, ne->dest, EDGE_FALLTHRU); cur_bb = new_cur_bb; basic_block latch_bb = next_bb; next_bb = e->dest; remove_edge (e); set_immediate_dominator (CDI_DOMINATORS, ne->dest, esrc); set_immediate_dominator (CDI_DOMINATORS, latch_bb, ne->dest); set_immediate_dominator (CDI_DOMINATORS, cur_bb, ne->dest); } } t = NULL_TREE; for (i = fd->first_nonrect; i < fd->last_nonrect; i++) if (!fd->loops[i].non_rect_referenced && fd->loops[i].m1 == NULL_TREE && fd->loops[i].m2 == NULL_TREE) { if (t == NULL_TREE) t = counts[i]; else t = fold_build2 (MULT_EXPR, type, t, counts[i]); } if (t) { t = fold_build2 (MULT_EXPR, type, counts[fd->last_nonrect], t); expand_omp_build_assign (gsi, counts[fd->last_nonrect], t); } if (!rect_count_seen) t = counts[fd->last_nonrect]; else t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[fd->last_nonrect]); expand_omp_build_assign (gsi, fd->loop.n2, t); } else if (fd->non_rect) { tree t = fd->loop.n2; gcc_assert (TREE_CODE (t) == INTEGER_CST); int non_rect_referenced = 0, non_rect = 0; for (i = 0; i < fd->collapse; i++) { if ((i < fd->first_nonrect || i > fd->last_nonrect) && !integer_zerop (counts[i])) t = fold_build2 (TRUNC_DIV_EXPR, type, t, counts[i]); if (fd->loops[i].non_rect_referenced) non_rect_referenced++; if (fd->loops[i].m1 || fd->loops[i].m2) non_rect++; } gcc_assert (non_rect == 1 && non_rect_referenced == 1); counts[fd->last_nonrect] = t; } } /* Helper function for expand_omp_{for_*,simd}. Generate code like: T = V; V3 = N31 + (T % count3) * STEP3; T = T / count3; V2 = N21 + (T % count2) * STEP2; T = T / count2; V1 = N11 + T * STEP1; if this loop doesn't have an inner loop construct combined with it. If it does have an inner loop construct combined with it and the iteration count isn't known constant, store values from counts array into its _looptemp_ temporaries instead. For non-rectangular loops (between fd->first_nonrect and fd->last_nonrect inclusive), use the count of all those loops together, and either find quadratic etc. equation roots, or as a fallback, do: COUNT = 0; for (tmpi = N11; tmpi COND1 N12; tmpi += STEP1) for (tmpj = M21 * tmpi + N21; tmpj COND2 M22 * tmpi + N22; tmpj += STEP2) { int tmpk1 = M31 * tmpj + N31; int tmpk2 = M32 * tmpj + N32; if (tmpk1 COND3 tmpk2) { if (COND3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; int temp = (adj + tmpk2 - tmpk1) / STEP3; if (COUNT + temp > T) { V1 = tmpi; V2 = tmpj; V3 = tmpk1 + (T - COUNT) * STEP3; goto done; } else COUNT += temp; } } done:; but for optional innermost or outermost rectangular loops that aren't referenced by other loop expressions keep doing the division/modulo. */ static void expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi, tree *counts, tree *nonrect_bounds, gimple *inner_stmt, tree startvar) { int i; if (gimple_omp_for_combined_p (fd->for_stmt)) { /* If fd->loop.n2 is constant, then no propagation of the counts is needed, they are constant. */ if (TREE_CODE (fd->loop.n2) == INTEGER_CST) return; tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR ? gimple_omp_taskreg_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); /* First two _looptemp_ clauses are for istart/iend, counts[0] isn't supposed to be handled, as the inner loop doesn't use it. */ tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); int count = 0; if (fd->non_rect && fd->last_nonrect == fd->first_nonrect + 1 && !TYPE_UNSIGNED (TREE_TYPE (fd->loops[fd->last_nonrect].v))) count = 4; for (i = 0; i < fd->collapse + count; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); if (i) { tree tem = OMP_CLAUSE_DECL (innerc); tree t; if (i < fd->collapse) t = counts[i]; else switch (i - fd->collapse) { case 0: t = counts[0]; break; case 1: t = fd->first_inner_iterations; break; case 2: t = fd->factor; break; case 3: t = fd->adjn1; break; default: gcc_unreachable (); } t = fold_convert (TREE_TYPE (tem), t); t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); gassign *stmt = gimple_build_assign (tem, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } } return; } tree type = TREE_TYPE (fd->loop.v); tree tem = create_tmp_reg (type, ".tem"); gassign *stmt = gimple_build_assign (tem, startvar); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v), itype, t; itype = vtype; if (POINTER_TYPE_P (vtype)) itype = signed_type_for (vtype); if (i != 0 && (i != fd->last_nonrect || fd->first_nonrect)) t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]); else t = tem; if (i == fd->last_nonrect) { t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree stopval = t; tree idx = create_tmp_reg (type, ".count"); expand_omp_build_assign (gsi, idx, build_zero_cst (type), true); basic_block bb_triang = NULL, bb_triang_dom = NULL; if (fd->first_nonrect + 1 == fd->last_nonrect && (TREE_CODE (fd->loop.n2) == INTEGER_CST || fd->first_inner_iterations) && (optab_handler (sqrt_optab, TYPE_MODE (double_type_node)) != CODE_FOR_nothing)) { tree outer_n1 = fd->adjn1 ? fd->adjn1 : fd->loops[i - 1].n1; tree itype = TREE_TYPE (fd->loops[i].v); tree first_inner_iterations = fd->first_inner_iterations; tree factor = fd->factor; gcond *cond_stmt = gimple_build_cond (NE_EXPR, factor, build_zero_cst (TREE_TYPE (factor)), NULL_TREE, NULL_TREE); gsi_insert_after (gsi, cond_stmt, GSI_CONTINUE_LINKING); edge e = split_block (gsi_bb (*gsi), cond_stmt); basic_block bb0 = e->src; e->flags = EDGE_TRUE_VALUE; e->probability = profile_probability::likely (); bb_triang_dom = bb0; *gsi = gsi_after_labels (e->dest); tree slltype = long_long_integer_type_node; tree ulltype = long_long_unsigned_type_node; tree stopvalull = fold_convert (ulltype, stopval); stopvalull = force_gimple_operand_gsi (gsi, stopvalull, true, NULL_TREE, false, GSI_CONTINUE_LINKING); first_inner_iterations = fold_convert (slltype, first_inner_iterations); first_inner_iterations = force_gimple_operand_gsi (gsi, first_inner_iterations, true, NULL_TREE, false, GSI_CONTINUE_LINKING); factor = fold_convert (slltype, factor); factor = force_gimple_operand_gsi (gsi, factor, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree first_inner_iterationsd = fold_build1 (FLOAT_EXPR, double_type_node, first_inner_iterations); first_inner_iterationsd = force_gimple_operand_gsi (gsi, first_inner_iterationsd, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree factord = fold_build1 (FLOAT_EXPR, double_type_node, factor); factord = force_gimple_operand_gsi (gsi, factord, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree stopvald = fold_build1 (FLOAT_EXPR, double_type_node, stopvalull); stopvald = force_gimple_operand_gsi (gsi, stopvald, true, NULL_TREE, false, GSI_CONTINUE_LINKING); /* Temporarily disable flag_rounding_math, values will be decimal numbers divided by 2 and worst case imprecisions due to too large values ought to be caught later by the checks for fallback. */ int save_flag_rounding_math = flag_rounding_math; flag_rounding_math = 0; t = fold_build2 (RDIV_EXPR, double_type_node, factord, build_real (double_type_node, dconst2)); tree t3 = fold_build2 (MINUS_EXPR, double_type_node, first_inner_iterationsd, t); t3 = force_gimple_operand_gsi (gsi, t3, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (MULT_EXPR, double_type_node, factord, build_real (double_type_node, dconst2)); t = fold_build2 (MULT_EXPR, double_type_node, t, stopvald); t = fold_build2 (PLUS_EXPR, double_type_node, t, fold_build2 (MULT_EXPR, double_type_node, t3, t3)); flag_rounding_math = save_flag_rounding_math; t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (flag_exceptions && cfun->can_throw_non_call_exceptions && operation_could_trap_p (LT_EXPR, true, false, NULL_TREE)) { tree tem = fold_build2 (LT_EXPR, boolean_type_node, t, build_zero_cst (double_type_node)); tem = force_gimple_operand_gsi (gsi, tem, true, NULL_TREE, false, GSI_CONTINUE_LINKING); cond_stmt = gimple_build_cond (NE_EXPR, tem, boolean_false_node, NULL_TREE, NULL_TREE); } else cond_stmt = gimple_build_cond (LT_EXPR, t, build_zero_cst (double_type_node), NULL_TREE, NULL_TREE); gsi_insert_after (gsi, cond_stmt, GSI_CONTINUE_LINKING); e = split_block (gsi_bb (*gsi), cond_stmt); basic_block bb1 = e->src; e->flags = EDGE_FALSE_VALUE; e->probability = profile_probability::very_likely (); *gsi = gsi_after_labels (e->dest); gcall *call = gimple_build_call_internal (IFN_SQRT, 1, t); tree sqrtr = create_tmp_var (double_type_node); gimple_call_set_lhs (call, sqrtr); gsi_insert_after (gsi, call, GSI_CONTINUE_LINKING); t = fold_build2 (MINUS_EXPR, double_type_node, sqrtr, t3); t = fold_build2 (RDIV_EXPR, double_type_node, t, factord); t = fold_build1 (FIX_TRUNC_EXPR, ulltype, t); tree c = create_tmp_var (ulltype); tree d = create_tmp_var (ulltype); expand_omp_build_assign (gsi, c, t, true); t = fold_build2 (MINUS_EXPR, ulltype, c, build_one_cst (ulltype)); t = fold_build2 (MULT_EXPR, ulltype, c, t); t = fold_build2 (RSHIFT_EXPR, ulltype, t, integer_one_node); t = fold_build2 (MULT_EXPR, ulltype, fold_convert (ulltype, fd->factor), t); tree t2 = fold_build2 (MULT_EXPR, ulltype, c, fold_convert (ulltype, fd->first_inner_iterations)); t = fold_build2 (PLUS_EXPR, ulltype, t, t2); expand_omp_build_assign (gsi, d, t, true); t = fold_build2 (MULT_EXPR, ulltype, fold_convert (ulltype, fd->factor), c); t = fold_build2 (PLUS_EXPR, ulltype, t, fold_convert (ulltype, fd->first_inner_iterations)); t2 = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); cond_stmt = gimple_build_cond (GE_EXPR, stopvalull, d, NULL_TREE, NULL_TREE); gsi_insert_after (gsi, cond_stmt, GSI_CONTINUE_LINKING); e = split_block (gsi_bb (*gsi), cond_stmt); basic_block bb2 = e->src; e->flags = EDGE_TRUE_VALUE; e->probability = profile_probability::very_likely (); *gsi = gsi_after_labels (e->dest); t = fold_build2 (PLUS_EXPR, ulltype, d, t2); t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); cond_stmt = gimple_build_cond (GE_EXPR, stopvalull, t, NULL_TREE, NULL_TREE); gsi_insert_after (gsi, cond_stmt, GSI_CONTINUE_LINKING); e = split_block (gsi_bb (*gsi), cond_stmt); basic_block bb3 = e->src; e->flags = EDGE_FALSE_VALUE; e->probability = profile_probability::very_likely (); *gsi = gsi_after_labels (e->dest); t = fold_convert (itype, c); t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i - 1].step); t = fold_build2 (PLUS_EXPR, itype, outer_n1, t); t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); expand_omp_build_assign (gsi, fd->loops[i - 1].v, t, true); t2 = fold_build2 (MINUS_EXPR, ulltype, stopvalull, d); t2 = fold_convert (itype, t2); t2 = fold_build2 (MULT_EXPR, itype, t2, fd->loops[i].step); t2 = fold_build2 (PLUS_EXPR, itype, t2, fd->loops[i].n1); if (fd->loops[i].m1) { t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].m1); t2 = fold_build2 (PLUS_EXPR, itype, t2, t); } expand_omp_build_assign (gsi, fd->loops[i].v, t2, true); e = split_block (gsi_bb (*gsi), gsi_stmt (*gsi)); bb_triang = e->src; *gsi = gsi_after_labels (e->dest); remove_edge (e); e = make_edge (bb1, gsi_bb (*gsi), EDGE_TRUE_VALUE); e->probability = profile_probability::very_unlikely (); e = make_edge (bb2, gsi_bb (*gsi), EDGE_FALSE_VALUE); e->probability = profile_probability::very_unlikely (); e = make_edge (bb3, gsi_bb (*gsi), EDGE_TRUE_VALUE); e->probability = profile_probability::very_unlikely (); basic_block bb4 = create_empty_bb (bb0); add_bb_to_loop (bb4, bb0->loop_father); e = make_edge (bb0, bb4, EDGE_FALSE_VALUE); e->probability = profile_probability::unlikely (); make_edge (bb4, gsi_bb (*gsi), EDGE_FALLTHRU); set_immediate_dominator (CDI_DOMINATORS, bb4, bb0); set_immediate_dominator (CDI_DOMINATORS, gsi_bb (*gsi), bb0); gimple_stmt_iterator gsi2 = gsi_after_labels (bb4); t2 = fold_build2 (TRUNC_DIV_EXPR, type, counts[i], counts[i - 1]); t2 = force_gimple_operand_gsi (&gsi2, t2, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (TRUNC_MOD_EXPR, type, stopval, t2); t2 = fold_build2 (TRUNC_DIV_EXPR, type, stopval, t2); t = fold_convert (itype, t); t2 = fold_convert (itype, t2); t = fold_build2 (MULT_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t); t2 = fold_build2 (MULT_EXPR, itype, t2, fold_convert (itype, fd->loops[i - 1].step)); t2 = fold_build2 (PLUS_EXPR, itype, fd->loops[i - 1].n1, t2); t2 = force_gimple_operand_gsi (&gsi2, t2, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i - 1].v, t2); gsi_insert_after (&gsi2, stmt, GSI_CONTINUE_LINKING); if (fd->loops[i].m1) { t2 = fold_build2 (MULT_EXPR, itype, fd->loops[i].m1, fd->loops[i - 1].v); t = fold_build2 (PLUS_EXPR, itype, t, t2); } t = force_gimple_operand_gsi (&gsi2, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (&gsi2, stmt, GSI_CONTINUE_LINKING); } /* Fallback implementation. Evaluate the loops in between (inclusive) fd->first_nonrect and fd->last_nonrect at runtime unsing temporaries instead of the original iteration variables, in the body just bump the counter and compare with the desired value. */ gimple_stmt_iterator gsi2 = *gsi; basic_block entry_bb = gsi_bb (gsi2); edge e = split_block (entry_bb, gsi_stmt (gsi2)); e = split_block (e->dest, (gimple *) NULL); basic_block dom_bb = NULL; basic_block cur_bb = e->src; basic_block next_bb = e->dest; entry_bb = e->dest; *gsi = gsi_after_labels (entry_bb); tree *vs = XALLOCAVEC (tree, fd->last_nonrect); tree n1 = NULL_TREE, n2 = NULL_TREE; memset (vs, 0, fd->last_nonrect * sizeof (tree)); for (int j = fd->first_nonrect; j <= fd->last_nonrect; j++) { tree itype = TREE_TYPE (fd->loops[j].v); bool rect_p = (fd->loops[j].m1 == NULL_TREE && fd->loops[j].m2 == NULL_TREE && !fd->loops[j].non_rect_referenced); gsi2 = gsi_after_labels (cur_bb); t = fold_convert (itype, unshare_expr (fd->loops[j].n1)); if (fd->loops[j].m1) { n1 = fold_convert (itype, unshare_expr (fd->loops[j].m1)); n1 = fold_build2 (MULT_EXPR, itype, vs[j - fd->loops[j].outer], n1); n1 = fold_build2 (PLUS_EXPR, itype, n1, t); } else if (rect_p) n1 = build_zero_cst (type); else n1 = t; n1 = force_gimple_operand_gsi (&gsi2, n1, true, NULL_TREE, true, GSI_SAME_STMT); if (j < fd->last_nonrect) { vs[j] = create_tmp_reg (rect_p ? type : itype, ".it"); expand_omp_build_assign (&gsi2, vs[j], n1); } t = fold_convert (itype, unshare_expr (fd->loops[j].n2)); if (fd->loops[j].m2) { n2 = fold_convert (itype, unshare_expr (fd->loops[j].m2)); n2 = fold_build2 (MULT_EXPR, itype, vs[j - fd->loops[j].outer], n2); n2 = fold_build2 (PLUS_EXPR, itype, n2, t); } else if (rect_p) n2 = counts[j]; else n2 = t; n2 = force_gimple_operand_gsi (&gsi2, n2, true, NULL_TREE, true, GSI_SAME_STMT); if (j == fd->last_nonrect) { gcond *cond_stmt = gimple_build_cond (fd->loops[j].cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT); e = split_block (cur_bb, cond_stmt); e->flags = EDGE_TRUE_VALUE; edge ne = make_edge (cur_bb, next_bb, EDGE_FALSE_VALUE); e->probability = profile_probability::likely ().guessed (); ne->probability = e->probability.invert (); gsi2 = gsi_after_labels (e->dest); t = build_int_cst (itype, (fd->loops[j].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[j].step), t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, n1); tree step = fold_convert (itype, fd->loops[j].step); if (TYPE_UNSIGNED (itype) && fd->loops[j].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (type, t); t = fold_build2 (PLUS_EXPR, type, idx, t); t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); e = make_edge (e->dest, next_bb, EDGE_FALLTHRU); set_immediate_dominator (CDI_DOMINATORS, next_bb, cur_bb); cond_stmt = gimple_build_cond (LE_EXPR, t, stopval, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT); e = split_block (gsi_bb (gsi2), cond_stmt); e->flags = EDGE_TRUE_VALUE; e->probability = profile_probability::likely ().guessed (); ne = make_edge (e->src, entry_bb, EDGE_FALSE_VALUE); ne->probability = e->probability.invert (); gsi2 = gsi_after_labels (e->dest); expand_omp_build_assign (&gsi2, idx, t); set_immediate_dominator (CDI_DOMINATORS, entry_bb, dom_bb); break; } e = split_block (cur_bb, last_stmt (cur_bb)); basic_block new_cur_bb = create_empty_bb (cur_bb); add_bb_to_loop (new_cur_bb, cur_bb->loop_father); gsi2 = gsi_after_labels (e->dest); if (rect_p) t = fold_build2 (PLUS_EXPR, type, vs[j], build_one_cst (type)); else { tree step = fold_convert (itype, unshare_expr (fd->loops[j].step)); t = fold_build2 (PLUS_EXPR, itype, vs[j], step); } t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); expand_omp_build_assign (&gsi2, vs[j], t); edge ne = split_block (e->dest, last_stmt (e->dest)); gsi2 = gsi_after_labels (ne->dest); gcond *cond_stmt; if (next_bb == entry_bb) /* No need to actually check the outermost condition. */ cond_stmt = gimple_build_cond (EQ_EXPR, boolean_true_node, boolean_true_node, NULL_TREE, NULL_TREE); else cond_stmt = gimple_build_cond (rect_p ? LT_EXPR : fd->loops[j].cond_code, vs[j], n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT); edge e3, e4; if (next_bb == entry_bb) { e3 = find_edge (ne->dest, next_bb); e3->flags = EDGE_FALSE_VALUE; dom_bb = ne->dest; } else e3 = make_edge (ne->dest, next_bb, EDGE_FALSE_VALUE); e4 = make_edge (ne->dest, new_cur_bb, EDGE_TRUE_VALUE); e4->probability = profile_probability::likely ().guessed (); e3->probability = e4->probability.invert (); basic_block esrc = e->src; make_edge (e->src, ne->dest, EDGE_FALLTHRU); cur_bb = new_cur_bb; basic_block latch_bb = next_bb; next_bb = e->dest; remove_edge (e); set_immediate_dominator (CDI_DOMINATORS, ne->dest, esrc); set_immediate_dominator (CDI_DOMINATORS, latch_bb, ne->dest); set_immediate_dominator (CDI_DOMINATORS, cur_bb, ne->dest); } for (int j = fd->last_nonrect; j >= fd->first_nonrect; j--) { tree itype = TREE_TYPE (fd->loops[j].v); bool rect_p = (fd->loops[j].m1 == NULL_TREE && fd->loops[j].m2 == NULL_TREE && !fd->loops[j].non_rect_referenced); if (j == fd->last_nonrect) { t = fold_build2 (MINUS_EXPR, type, stopval, idx); t = fold_convert (itype, t); tree t2 = fold_convert (itype, unshare_expr (fd->loops[j].step)); t = fold_build2 (MULT_EXPR, itype, t, t2); t = fold_build2 (PLUS_EXPR, itype, n1, t); } else if (rect_p) { t = fold_convert (itype, vs[j]); t = fold_build2 (MULT_EXPR, itype, t, fold_convert (itype, fd->loops[j].step)); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[j].n1, t); else t = fold_build2 (PLUS_EXPR, itype, fd->loops[j].n1, t); } else t = vs[j]; t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (fd->loops[j].v, t); gsi_insert_before (gsi, stmt, GSI_SAME_STMT); } if (gsi_end_p (*gsi)) *gsi = gsi_last_bb (gsi_bb (*gsi)); else gsi_prev (gsi); if (bb_triang) { e = split_block (gsi_bb (*gsi), gsi_stmt (*gsi)); make_edge (bb_triang, e->dest, EDGE_FALLTHRU); *gsi = gsi_after_labels (e->dest); if (!gsi_end_p (*gsi)) gsi_insert_before (gsi, gimple_build_nop (), GSI_NEW_STMT); set_immediate_dominator (CDI_DOMINATORS, e->dest, bb_triang_dom); } } else { t = fold_convert (itype, t); t = fold_build2 (MULT_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].n1, t); else t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t); t = force_gimple_operand_gsi (gsi, t, DECL_P (fd->loops[i].v) && TREE_ADDRESSABLE (fd->loops[i].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } if (i != 0 && (i != fd->last_nonrect || fd->first_nonrect)) { t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]); t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (tem, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } if (i == fd->last_nonrect) i = fd->first_nonrect; } if (fd->non_rect) for (i = 0; i <= fd->last_nonrect; i++) if (fd->loops[i].m2) { tree itype = TREE_TYPE (fd->loops[i].v); tree t = fold_convert (itype, unshare_expr (fd->loops[i].m2)); t = fold_build2 (MULT_EXPR, itype, fd->loops[i - fd->loops[i].outer].v, t); t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, unshare_expr (fd->loops[i].n2))); nonrect_bounds[i] = create_tmp_reg (itype, ".bound"); t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (nonrect_bounds[i], t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } } /* Helper function for expand_omp_for_*. Generate code like: L10: V3 += STEP3; if (V3 cond3 N32) goto BODY_BB; else goto L11; L11: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto BODY_BB; else goto L12; L12: V2 = N21; V1 += STEP1; goto BODY_BB; For non-rectangular loops, use temporaries stored in nonrect_bounds for the upper bounds if M?2 multiplier is present. Given e.g. for (V1 = N11; V1 cond1 N12; V1 += STEP1) for (V2 = N21; V2 cond2 N22; V2 += STEP2) for (V3 = N31; V3 cond3 N32; V3 += STEP3) for (V4 = N41 + M41 * V2; V4 cond4 N42 + M42 * V2; V4 += STEP4) do: L10: V4 += STEP4; if (V4 cond4 NONRECT_BOUND4) goto BODY_BB; else goto L11; L11: V4 = N41 + M41 * V2; // This can be left out if the loop // refers to the immediate parent loop V3 += STEP3; if (V3 cond3 N32) goto BODY_BB; else goto L12; L12: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto L120; else goto L13; L120: V4 = N41 + M41 * V2; NONRECT_BOUND4 = N42 + M42 * V2; if (V4 cond4 NONRECT_BOUND4) goto BODY_BB; else goto L12; L13: V2 = N21; V1 += STEP1; goto L120; */ static basic_block extract_omp_for_update_vars (struct omp_for_data *fd, tree *nonrect_bounds, basic_block cont_bb, basic_block body_bb) { basic_block last_bb, bb, collapse_bb = NULL; int i; gimple_stmt_iterator gsi; edge e; tree t; gimple *stmt; last_bb = cont_bb; for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v); bb = create_empty_bb (last_bb); add_bb_to_loop (bb, last_bb->loop_father); gsi = gsi_start_bb (bb); if (i < fd->collapse - 1) { e = make_edge (last_bb, bb, EDGE_FALSE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (1, 8); struct omp_for_data_loop *l = &fd->loops[i + 1]; if (l->m1 == NULL_TREE || l->outer != 1) { t = l->n1; if (l->m1) { tree t2 = fold_build2 (MULT_EXPR, TREE_TYPE (t), fd->loops[i + 1 - l->outer].v, l->m1); t = fold_build2 (PLUS_EXPR, TREE_TYPE (t), t2, t); } t = force_gimple_operand_gsi (&gsi, t, DECL_P (l->v) && TREE_ADDRESSABLE (l->v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (l->v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); } } else collapse_bb = bb; set_immediate_dominator (CDI_DOMINATORS, bb, last_bb); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step); else t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (fd->loops[i].v) && TREE_ADDRESSABLE (fd->loops[i].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); if (fd->loops[i].non_rect_referenced) { basic_block update_bb = NULL, prev_bb = NULL; for (int j = i + 1; j <= fd->last_nonrect; j++) if (j - fd->loops[j].outer == i) { tree n1, n2; struct omp_for_data_loop *l = &fd->loops[j]; basic_block this_bb = create_empty_bb (last_bb); add_bb_to_loop (this_bb, last_bb->loop_father); gimple_stmt_iterator gsi2 = gsi_start_bb (this_bb); if (prev_bb) { e = make_edge (prev_bb, this_bb, EDGE_TRUE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (7, 8); set_immediate_dominator (CDI_DOMINATORS, this_bb, prev_bb); } if (l->m1) { t = fold_build2 (MULT_EXPR, TREE_TYPE (l->m1), l->m1, fd->loops[i].v); t = fold_build2 (PLUS_EXPR, TREE_TYPE (l->v), t, l->n1); n1 = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (l->v, n1); gsi_insert_after (&gsi2, stmt, GSI_CONTINUE_LINKING); n1 = l->v; } else n1 = force_gimple_operand_gsi (&gsi2, l->n1, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (l->m2) { t = fold_build2 (MULT_EXPR, TREE_TYPE (l->m2), l->m2, fd->loops[i].v); t = fold_build2 (PLUS_EXPR, TREE_TYPE (nonrect_bounds[j]), t, unshare_expr (l->n2)); n2 = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (nonrect_bounds[j], n2); gsi_insert_after (&gsi2, stmt, GSI_CONTINUE_LINKING); n2 = nonrect_bounds[j]; } else n2 = force_gimple_operand_gsi (&gsi2, unshare_expr (l->n2), true, NULL_TREE, false, GSI_CONTINUE_LINKING); gcond *cond_stmt = gimple_build_cond (l->cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_after (&gsi2, cond_stmt, GSI_CONTINUE_LINKING); if (update_bb == NULL) update_bb = this_bb; e = make_edge (this_bb, bb, EDGE_FALSE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (1, 8); if (prev_bb == NULL) set_immediate_dominator (CDI_DOMINATORS, this_bb, bb); prev_bb = this_bb; } e = make_edge (prev_bb, body_bb, EDGE_TRUE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (7, 8); body_bb = update_bb; } if (i > 0) { if (fd->loops[i].m2) t = nonrect_bounds[i]; else t = unshare_expr (fd->loops[i].n2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree v = fd->loops[i].v; if (DECL_P (v) && TREE_ADDRESSABLE (v)) v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t); stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); if (walk_tree (gimple_cond_lhs_ptr (as_a <gcond *> (stmt)), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (as_a <gcond *> (stmt)), expand_omp_regimplify_p, NULL, NULL)) gimple_regimplify_operands (stmt, &gsi); e = make_edge (bb, body_bb, EDGE_TRUE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (7, 8); } else make_edge (bb, body_bb, EDGE_FALLTHRU); set_immediate_dominator (CDI_DOMINATORS, bb, last_bb); last_bb = bb; } return collapse_bb; } /* Expand #pragma omp ordered depend(source). */ static void expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd, tree *counts, location_t loc) { enum built_in_function source_ix = fd->iter_type == long_integer_type_node ? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST; gimple *g = gimple_build_call (builtin_decl_explicit (source_ix), 1, build_fold_addr_expr (counts[fd->ordered])); gimple_set_location (g, loc); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Expand a single depend from #pragma omp ordered depend(sink:...). */ static void expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd, tree *counts, tree c, location_t loc) { auto_vec<tree, 10> args; enum built_in_function sink_ix = fd->iter_type == long_integer_type_node ? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT; tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE; int i; gimple_stmt_iterator gsi2 = *gsi; bool warned_step = false; for (i = 0; i < fd->ordered; i++) { tree step = NULL_TREE; off = TREE_PURPOSE (deps); if (TREE_CODE (off) == TRUNC_DIV_EXPR) { step = TREE_OPERAND (off, 1); off = TREE_OPERAND (off, 0); } if (!integer_zerop (off)) { gcc_assert (fd->loops[i].cond_code == LT_EXPR || fd->loops[i].cond_code == GT_EXPR); bool forward = fd->loops[i].cond_code == LT_EXPR; if (step) { /* Non-simple Fortran DO loops. If step is variable, we don't know at compile even the direction, so can't warn. */ if (TREE_CODE (step) != INTEGER_CST) break; forward = tree_int_cst_sgn (step) != -1; } if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) warning_at (loc, 0, "%<depend%> clause with %<sink%> modifier " "waiting for lexically later iteration"); break; } deps = TREE_CHAIN (deps); } /* If all offsets corresponding to the collapsed loops are zero, this depend clause can be ignored. FIXME: but there is still a flush needed. We need to emit one __sync_synchronize () for it though (perhaps conditionally)? Solve this together with the conservative dependence folding optimization. if (i >= fd->collapse) return; */ deps = OMP_CLAUSE_DECL (c); gsi_prev (&gsi2); edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2)); edge e2 = split_block_after_labels (e1->dest); gsi2 = gsi_after_labels (e1->dest); *gsi = gsi_last_bb (e1->src); for (i = 0; i < fd->ordered; i++) { tree itype = TREE_TYPE (fd->loops[i].v); tree step = NULL_TREE; tree orig_off = NULL_TREE; if (POINTER_TYPE_P (itype)) itype = sizetype; if (i) deps = TREE_CHAIN (deps); off = TREE_PURPOSE (deps); if (TREE_CODE (off) == TRUNC_DIV_EXPR) { step = TREE_OPERAND (off, 1); off = TREE_OPERAND (off, 0); gcc_assert (fd->loops[i].cond_code == LT_EXPR && integer_onep (fd->loops[i].step) && !POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))); } tree s = fold_convert_loc (loc, itype, step ? step : fd->loops[i].step); if (step) { off = fold_convert_loc (loc, itype, off); orig_off = off; off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s); } if (integer_zerop (off)) t = boolean_true_node; else { tree a; tree co = fold_convert_loc (loc, itype, off); if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))) { if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) co = fold_build1_loc (loc, NEGATE_EXPR, itype, co); a = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, co); } else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, co); else a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, co); if (step) { tree t1, t2; if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t1 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, fd->loops[i].n1); else t1 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, fd->loops[i].n2); if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t2 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, fd->loops[i].n2); else t2 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, fd->loops[i].n1); t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, step, build_int_cst (TREE_TYPE (step), 0)); if (TREE_CODE (step) != INTEGER_CST) { t1 = unshare_expr (t1); t1 = force_gimple_operand_gsi (gsi, t1, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t2 = unshare_expr (t2); t2 = force_gimple_operand_gsi (gsi, t2, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } t = fold_build3_loc (loc, COND_EXPR, boolean_type_node, t, t2, t1); } else if (fd->loops[i].cond_code == LT_EXPR) { if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, fd->loops[i].n1); else t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, fd->loops[i].n2); } else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a, fd->loops[i].n2); else t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a, fd->loops[i].n1); } if (cond) cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t); else cond = t; off = fold_convert_loc (loc, itype, off); if (step || (fd->loops[i].cond_code == LT_EXPR ? !integer_onep (fd->loops[i].step) : !integer_minus_onep (fd->loops[i].step))) { if (step == NULL_TREE && TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off, fold_build1_loc (loc, NEGATE_EXPR, itype, s)); else t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, orig_off ? orig_off : off, s); t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t, build_int_cst (itype, 0)); if (integer_zerop (t) && !warned_step) { warning_at (loc, 0, "%<depend%> clause with %<sink%> modifier " "refers to iteration never in the iteration " "space"); warned_step = true; } cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t); } if (i <= fd->collapse - 1 && fd->collapse > 1) t = fd->loop.v; else if (counts[i]) t = counts[i]; else { t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, fd->loops[i].n1); t = fold_convert_loc (loc, fd->iter_type, t); } if (step) /* We have divided off by step already earlier. */; else if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, fold_build1_loc (loc, NEGATE_EXPR, itype, s)); else off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s); if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) off = fold_build1_loc (loc, NEGATE_EXPR, itype, off); off = fold_convert_loc (loc, fd->iter_type, off); if (i <= fd->collapse - 1 && fd->collapse > 1) { if (i) off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff, off); if (i < fd->collapse - 1) { coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off, counts[i]); continue; } } off = unshare_expr (off); t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off); t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); args.safe_push (t); } gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args); gimple_set_location (g, loc); gsi_insert_before (&gsi2, g, GSI_SAME_STMT); cond = unshare_expr (cond); cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false, GSI_CONTINUE_LINKING); gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT); edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE); e3->probability = profile_probability::guessed_always ().apply_scale (1, 8); e1->probability = e3->probability.invert (); e1->flags = EDGE_TRUE_VALUE; set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src); *gsi = gsi_after_labels (e2->dest); } /* Expand all #pragma omp ordered depend(source) and #pragma omp ordered depend(sink:...) constructs in the current #pragma omp for ordered(n) region. */ static void expand_omp_ordered_source_sink (struct omp_region *region, struct omp_for_data *fd, tree *counts, basic_block cont_bb) { struct omp_region *inner; int i; for (i = fd->collapse - 1; i < fd->ordered; i++) if (i == fd->collapse - 1 && fd->collapse > 1) counts[i] = NULL_TREE; else if (i >= fd->collapse && !cont_bb) counts[i] = build_zero_cst (fd->iter_type); else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)) && integer_onep (fd->loops[i].step)) counts[i] = NULL_TREE; else counts[i] = create_tmp_var (fd->iter_type, ".orditer"); tree atype = build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1); counts[fd->ordered] = create_tmp_var (atype, ".orditera"); TREE_ADDRESSABLE (counts[fd->ordered]) = 1; for (inner = region->inner; inner; inner = inner->next) if (inner->type == GIMPLE_OMP_ORDERED) { gomp_ordered *ord_stmt = inner->ord_stmt; gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt); location_t loc = gimple_location (ord_stmt); tree c; for (c = gimple_omp_ordered_clauses (ord_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE) break; if (c) expand_omp_ordered_source (&gsi, fd, counts, loc); for (c = gimple_omp_ordered_clauses (ord_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) expand_omp_ordered_sink (&gsi, fd, counts, c, loc); gsi_remove (&gsi, true); } } /* Wrap the body into fd->ordered - fd->collapse loops that aren't collapsed. */ static basic_block expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts, basic_block cont_bb, basic_block body_bb, bool ordered_lastprivate) { if (fd->ordered == fd->collapse) return cont_bb; if (!cont_bb) { gimple_stmt_iterator gsi = gsi_after_labels (body_bb); for (int i = fd->collapse; i < fd->ordered; i++) { tree type = TREE_TYPE (fd->loops[i].v); tree n1 = fold_convert (type, fd->loops[i].n1); expand_omp_build_assign (&gsi, fd->loops[i].v, n1); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_int (i - fd->collapse + 1), NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type)); } return NULL; } for (int i = fd->ordered - 1; i >= fd->collapse; i--) { tree t, type = TREE_TYPE (fd->loops[i].v); gimple_stmt_iterator gsi = gsi_after_labels (body_bb); expand_omp_build_assign (&gsi, fd->loops[i].v, fold_convert (type, fd->loops[i].n1)); if (counts[i]) expand_omp_build_assign (&gsi, counts[i], build_zero_cst (fd->iter_type)); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_int (i - fd->collapse + 1), NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type)); if (!gsi_end_p (gsi)) gsi_prev (&gsi); else gsi = gsi_last_bb (body_bb); edge e1 = split_block (body_bb, gsi_stmt (gsi)); basic_block new_body = e1->dest; if (body_bb == cont_bb) cont_bb = new_body; edge e2 = NULL; basic_block new_header; if (EDGE_COUNT (cont_bb->preds) > 0) { gsi = gsi_last_bb (cont_bb); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loops[i].v, fold_convert (sizetype, fd->loops[i].step)); else t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v, fold_convert (type, fd->loops[i].step)); expand_omp_build_assign (&gsi, fd->loops[i].v, t); if (counts[i]) { t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i], build_int_cst (fd->iter_type, 1)); expand_omp_build_assign (&gsi, counts[i], t); t = counts[i]; } else { t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, fd->loops[i].n1); t = fold_convert (fd->iter_type, t); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); } aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_int (i - fd->collapse + 1), NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, t); gsi_prev (&gsi); e2 = split_block (cont_bb, gsi_stmt (gsi)); new_header = e2->dest; } else new_header = cont_bb; gsi = gsi_after_labels (new_header); tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE, true, GSI_SAME_STMT); tree n2 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2), true, NULL_TREE, true, GSI_SAME_STMT); t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT); edge e3 = split_block (new_header, gsi_stmt (gsi)); cont_bb = e3->dest; remove_edge (e1); make_edge (body_bb, new_header, EDGE_FALLTHRU); e3->flags = EDGE_FALSE_VALUE; e3->probability = profile_probability::guessed_always ().apply_scale (1, 8); e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE); e1->probability = e3->probability.invert (); set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb); set_immediate_dominator (CDI_DOMINATORS, new_body, new_header); if (e2) { class loop *loop = alloc_loop (); loop->header = new_header; loop->latch = e2->src; add_loop (loop, body_bb->loop_father); } } /* If there are any lastprivate clauses and it is possible some loops might have zero iterations, ensure all the decls are initialized, otherwise we could crash evaluating C++ class iterators with lastprivate clauses. */ bool need_inits = false; for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++) if (need_inits) { tree type = TREE_TYPE (fd->loops[i].v); gimple_stmt_iterator gsi = gsi_after_labels (body_bb); expand_omp_build_assign (&gsi, fd->loops[i].v, fold_convert (type, fd->loops[i].n1)); } else { tree type = TREE_TYPE (fd->loops[i].v); tree this_cond = fold_build2 (fd->loops[i].cond_code, boolean_type_node, fold_convert (type, fd->loops[i].n1), fold_convert (type, fd->loops[i].n2)); if (!integer_onep (this_cond)) need_inits = true; } return cont_bb; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with any schedule. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; iend = iend0; L1: BODY; V += STEP; if (V cond iend) goto L1; else goto L2; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: If this is a combined omp parallel loop, instead of the call to GOMP_loop_foo_start, we call GOMP_loop_foo_next. If this is gimple_omp_for_combined_p loop, then instead of assigning V and iend in L0 we assign the first two _looptemp_ clause decls of the inner GIMPLE_OMP_FOR and V += STEP; and if (V cond iend) goto L1; else goto L2; are removed. For collapsed loops, given parameters: collapse(3) for (V1 = N11; V1 cond1 N12; V1 += STEP1) for (V2 = N21; V2 cond2 N22; V2 += STEP2) for (V3 = N31; V3 cond3 N32; V3 += STEP3) BODY; we generate pseudocode if (__builtin_expect (N32 cond3 N31, 0)) goto Z0; if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (__builtin_expect (N22 cond2 N21, 0)) goto Z0; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (__builtin_expect (N12 cond1 N11, 0)) goto Z0; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; goto Z1; Z0: count = 0; Z1: more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; T = V; V3 = N31 + (T % count3) * STEP3; T = T / count3; V2 = N21 + (T % count2) * STEP2; T = T / count2; V1 = N11 + T * STEP1; iend = iend0; L1: BODY; V += 1; if (V < iend) goto L10; else goto L2; L10: V3 += STEP3; if (V3 cond3 N32) goto L1; else goto L11; L11: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto L1; else goto L12; L12: V2 = N21; V1 += STEP1; goto L1; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: */ static void expand_omp_for_generic (struct omp_region *region, struct omp_for_data *fd, enum built_in_function start_fn, enum built_in_function next_fn, tree sched_arg, gimple *inner_stmt) { tree type, istart0, iend0, iend; tree t, vmain, vback, bias = NULL_TREE; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb; basic_block l2_bb = NULL, l3_bb = NULL; gimple_stmt_iterator gsi; gassign *assign_stmt; bool in_combined_parallel = is_combined_parallel (region); bool broken_loop = region->cont == NULL; edge e, ne; tree *counts = NULL; int i; bool ordered_lastprivate = false; gcc_assert (!broken_loop || !in_combined_parallel); gcc_assert (fd->iter_type == long_integer_type_node || !in_combined_parallel); entry_bb = region->entry; cont_bb = region->cont; collapse_bb = NULL; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = split_edge (FALLTHRU_EDGE (entry_bb)); l1_bb = single_succ (l0_bb); if (!broken_loop) { l2_bb = create_empty_bb (cont_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb || (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest == l1_bb)); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } else l2_bb = NULL; l3_bb = BRANCH_EDGE (entry_bb)->dest; exit_bb = region->exit; gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->ordered && omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_LASTPRIVATE)) ordered_lastprivate = false; tree reductions = NULL_TREE; tree mem = NULL_TREE, cond_var = NULL_TREE, condtemp = NULL_TREE; tree memv = NULL_TREE; if (fd->lastprivate_conditional) { tree c = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__CONDTEMP_); if (fd->have_pointer_condtemp) condtemp = OMP_CLAUSE_DECL (c); c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE__CONDTEMP_); cond_var = OMP_CLAUSE_DECL (c); } if (sched_arg) { if (fd->have_reductemp) { tree c = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__REDUCTEMP_); reductions = OMP_CLAUSE_DECL (c); gcc_assert (TREE_CODE (reductions) == SSA_NAME); gimple *g = SSA_NAME_DEF_STMT (reductions); reductions = gimple_assign_rhs1 (g); OMP_CLAUSE_DECL (c) = reductions; entry_bb = gimple_bb (g); edge e = split_block (entry_bb, g); if (region->entry == entry_bb) region->entry = e->dest; gsi = gsi_last_bb (entry_bb); } else reductions = null_pointer_node; if (fd->have_pointer_condtemp) { tree type = TREE_TYPE (condtemp); memv = create_tmp_var (type); TREE_ADDRESSABLE (memv) = 1; unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); sz *= fd->lastprivate_conditional; expand_omp_build_assign (&gsi, memv, build_int_cst (type, sz), false); mem = build_fold_addr_expr (memv); } else mem = null_pointer_node; } if (fd->collapse > 1 || fd->ordered) { int first_zero_iter1 = -1, first_zero_iter2 = -1; basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL; counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter1_bb, first_zero_iter1, zero_iter2_bb, first_zero_iter2, l2_dom_bb); if (zero_iter1_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter1; i < (fd->ordered ? fd->ordered : fd->collapse); i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; gsi_prev (&gsi); e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_nondebug_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter1_bb)); } if (zero_iter2_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter2; i < fd->ordered; i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; if (zero_iter1_bb) make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU); else { gsi_prev (&gsi); e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_nondebug_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter2_bb)); } } if (fd->collapse == 1) { counts[0] = fd->loop.n2; fd->loop = fd->loops[0]; } } type = TREE_TYPE (fd->loop.v); istart0 = create_tmp_var (fd->iter_type, ".istart0"); iend0 = create_tmp_var (fd->iter_type, ".iend0"); TREE_ADDRESSABLE (istart0) = 1; TREE_ADDRESSABLE (iend0) = 1; /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type) && fd->ordered == 0) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } gimple_stmt_iterator gsif = gsi; gsi_prev (&gsif); tree arr = NULL_TREE; if (in_combined_parallel) { gcc_assert (fd->ordered == 0); /* In a combined parallel loop, emit a call to GOMP_loop_foo_next. */ t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); } else { tree t0, t1, t2, t3, t4; /* If this is not a combined parallel loop, emit a call to GOMP_loop_foo_start in ENTRY_BB. */ t4 = build_fold_addr_expr (iend0); t3 = build_fold_addr_expr (istart0); if (fd->ordered) { t0 = build_int_cst (unsigned_type_node, fd->ordered - fd->collapse + 1); arr = create_tmp_var (build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1), ".omp_counts"); DECL_NAMELESS (arr) = 1; TREE_ADDRESSABLE (arr) = 1; TREE_STATIC (arr) = 1; vec<constructor_elt, va_gc> *v; vec_alloc (v, fd->ordered - fd->collapse + 1); int idx; for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++) { tree c; if (idx == 0 && fd->collapse > 1) c = fd->loop.n2; else c = counts[idx + fd->collapse - 1]; tree purpose = size_int (idx); CONSTRUCTOR_APPEND_ELT (v, purpose, c); if (TREE_CODE (c) != INTEGER_CST) TREE_STATIC (arr) = 0; } DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v); if (!TREE_STATIC (arr)) force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR, void_type_node, arr), true, NULL_TREE, true, GSI_SAME_STMT); t1 = build_fold_addr_expr (arr); t2 = NULL_TREE; } else { t2 = fold_convert (fd->iter_type, fd->loop.step); t1 = fd->loop.n2; t0 = fd->loop.n1; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); t0 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); t1 = OMP_CLAUSE_DECL (innerc); } if (POINTER_TYPE_P (TREE_TYPE (t0)) && TYPE_PRECISION (TREE_TYPE (t0)) != TYPE_PRECISION (fd->iter_type)) { /* Avoid casting pointers to integer of a different size. */ tree itype = signed_type_for (type); t1 = fold_convert (fd->iter_type, fold_convert (itype, t1)); t0 = fold_convert (fd->iter_type, fold_convert (itype, t0)); } else { t1 = fold_convert (fd->iter_type, t1); t0 = fold_convert (fd->iter_type, t0); } if (bias) { t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias); t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias); } } if (fd->iter_type == long_integer_type_node || fd->ordered) { if (fd->chunk_size) { t = fold_convert (fd->iter_type, fd->chunk_size); t = omp_adjust_chunk_size (t, fd->simd_schedule); if (sched_arg) { if (fd->ordered) t = build_call_expr (builtin_decl_explicit (start_fn), 8, t0, t1, sched_arg, t, t3, t4, reductions, mem); else t = build_call_expr (builtin_decl_explicit (start_fn), 9, t0, t1, t2, sched_arg, t, t3, t4, reductions, mem); } else if (fd->ordered) t = build_call_expr (builtin_decl_explicit (start_fn), 5, t0, t1, t, t3, t4); else t = build_call_expr (builtin_decl_explicit (start_fn), 6, t0, t1, t2, t, t3, t4); } else if (fd->ordered) t = build_call_expr (builtin_decl_explicit (start_fn), 4, t0, t1, t3, t4); else t = build_call_expr (builtin_decl_explicit (start_fn), 5, t0, t1, t2, t3, t4); } else { tree t5; tree c_bool_type; tree bfn_decl; /* The GOMP_loop_ull_*start functions have additional boolean argument, true for < loops and false for > loops. In Fortran, the C bool type can be different from boolean_type_node. */ bfn_decl = builtin_decl_explicit (start_fn); c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl)); t5 = build_int_cst (c_bool_type, fd->loop.cond_code == LT_EXPR ? 1 : 0); if (fd->chunk_size) { tree bfn_decl = builtin_decl_explicit (start_fn); t = fold_convert (fd->iter_type, fd->chunk_size); t = omp_adjust_chunk_size (t, fd->simd_schedule); if (sched_arg) t = build_call_expr (bfn_decl, 10, t5, t0, t1, t2, sched_arg, t, t3, t4, reductions, mem); else t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4); } else t = build_call_expr (builtin_decl_explicit (start_fn), 6, t5, t0, t1, t2, t3, t4); } } if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); if (arr && !TREE_STATIC (arr)) { tree clobber = build_clobber (TREE_TYPE (arr)); gsi_insert_before (&gsi, gimple_build_assign (arr, clobber), GSI_SAME_STMT); } if (fd->have_pointer_condtemp) expand_omp_build_assign (&gsi, condtemp, memv, false); if (fd->have_reductemp) { gimple *g = gsi_stmt (gsi); gsi_remove (&gsi, true); release_ssa_name (gimple_assign_lhs (g)); entry_bb = region->entry; gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); } gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); if (gsi_end_p (gsif)) gsif = gsi_after_labels (gsi_bb (gsif)); gsi_next (&gsif); /* Iteration setup for sequential loop goes in L0_BB. */ tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (inner_stmt) == GF_OMP_FOR_KIND_SIMD); tree innerc = omp_find_clause (gimple_omp_for_clauses (inner_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); } gsi = gsi_start_bb (l0_bb); t = istart0; if (fd->ordered && fd->collapse == 1) t = fold_build2 (MULT_EXPR, fd->iter_type, t, fold_convert (fd->iter_type, fd->loop.step)); else if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (fd->ordered && fd->collapse == 1) { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, fold_convert (sizetype, t)); else { t = fold_convert (TREE_TYPE (startvar), t); t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, t); } } else { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); t = fold_convert (TREE_TYPE (startvar), t); } t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (cond_var) { tree itype = TREE_TYPE (cond_var); /* For lastprivate(conditional:) itervar, we need some iteration counter that starts at unsigned non-zero and increases. Prefer as few IVs as possible, so if we can use startvar itself, use that, or startvar + constant (those would be incremented with step), and as last resort use the s0 + 1 incremented by 1. */ if ((fd->ordered && fd->collapse == 1) || bias || POINTER_TYPE_P (type) || TREE_CODE (fd->loop.n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, istart0), build_int_cst (itype, 1)); else if (tree_int_cst_sgn (fd->loop.n1) == 1) t = fold_convert (itype, t); else { tree c = fold_convert (itype, fd->loop.n1); c = fold_build2 (MINUS_EXPR, itype, build_int_cst (itype, 1), c); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, t), c); } t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (cond_var, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } t = iend0; if (fd->ordered && fd->collapse == 1) t = fold_build2 (MULT_EXPR, fd->iter_type, t, fold_convert (fd->iter_type, fd->loop.step)); else if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (fd->ordered && fd->collapse == 1) { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, fold_convert (sizetype, t)); else { t = fold_convert (TREE_TYPE (startvar), t); t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, t); } } else { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); t = fold_convert (TREE_TYPE (startvar), t); } iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, iend); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend))) assign_stmt = gimple_build_assign (fd->loop.v, iend); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Handle linear clause adjustments. */ tree itercnt = NULL_TREE; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) for (tree c = gimple_omp_for_clauses (fd->for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { tree d = OMP_CLAUSE_DECL (c); bool is_ref = omp_is_reference (d); tree t = d, a, dest; if (is_ref) t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); tree type = TREE_TYPE (t); if (POINTER_TYPE_P (type)) type = sizetype; dest = unshare_expr (t); tree v = create_tmp_var (TREE_TYPE (t), NULL); expand_omp_build_assign (&gsif, v, t); if (itercnt == NULL_TREE) { itercnt = startvar; tree n1 = fd->loop.n1; if (POINTER_TYPE_P (TREE_TYPE (itercnt))) { itercnt = fold_convert (signed_type_for (TREE_TYPE (itercnt)), itercnt); n1 = fold_convert (TREE_TYPE (itercnt), n1); } itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt), itercnt, n1); itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt), itercnt, fd->loop.step); itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } a = fold_build2 (MULT_EXPR, type, fold_convert (type, itercnt), fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (dest, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, NULL, inner_stmt, startvar); if (fd->ordered) { /* Until now, counts array contained number of iterations or variable containing it for ith loop. From now on, we need those counts only for collapsed loops, and only for the 2nd till the last collapsed one. Move those one element earlier, we'll use counts[fd->collapse - 1] for the first source/sink iteration counter and so on and counts[fd->ordered] as the array holding the current counter values for depend(source). */ if (fd->collapse > 1) memmove (counts, counts + 1, (fd->collapse - 1) * sizeof (counts[0])); if (broken_loop) { int i; for (i = fd->collapse; i < fd->ordered; i++) { tree type = TREE_TYPE (fd->loops[i].v); tree this_cond = fold_build2 (fd->loops[i].cond_code, boolean_type_node, fold_convert (type, fd->loops[i].n1), fold_convert (type, fd->loops[i].n2)); if (!integer_onep (this_cond)) break; } if (i < fd->ordered) { cont_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb); add_bb_to_loop (cont_bb, l1_bb->loop_father); gimple_stmt_iterator gsi = gsi_after_labels (cont_bb); gimple *g = gimple_build_omp_continue (fd->loop.v, fd->loop.v); gsi_insert_before (&gsi, g, GSI_SAME_STMT); make_edge (cont_bb, l3_bb, EDGE_FALLTHRU); make_edge (cont_bb, l1_bb, 0); l2_bb = create_empty_bb (cont_bb); broken_loop = false; } } expand_omp_ordered_source_sink (region, fd, counts, cont_bb); cont_bb = expand_omp_for_ordered_loops (fd, counts, cont_bb, l1_bb, ordered_lastprivate); if (counts[fd->collapse - 1]) { gcc_assert (fd->collapse == 1); gsi = gsi_last_bb (l0_bb); expand_omp_build_assign (&gsi, counts[fd->collapse - 1], istart0, true); gsi = gsi_last_bb (cont_bb); t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[fd->collapse - 1], build_int_cst (fd->iter_type, 1)); expand_omp_build_assign (&gsi, counts[fd->collapse - 1], t); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_zero_node, NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, counts[fd->collapse - 1]); t = counts[fd->collapse - 1]; } else if (fd->collapse > 1) t = fd->loop.v; else { t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v), fd->loops[0].v, fd->loops[0].n1); t = fold_convert (fd->iter_type, t); } gsi = gsi_last_bb (l0_bb); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_zero_node, NULL_TREE, NULL_TREE); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); expand_omp_build_assign (&gsi, aref, t, true); } if (!broken_loop) { /* Code to control the increment and predicate for the sequential loop goes in the CONT_BB. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (cond_var) { tree itype = TREE_TYPE (cond_var); tree t2; if ((fd->ordered && fd->collapse == 1) || bias || POINTER_TYPE_P (type) || TREE_CODE (fd->loop.n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t2 = build_int_cst (itype, 1); else t2 = fold_convert (itype, fd->loop.step); t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2); t2 = force_gimple_operand_gsi (&gsi, t2, false, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (cond_var, t2); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, fd->loop.step); else t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE) { tree tem; if (fd->collapse > 1) tem = fd->loop.v; else { tem = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v), fd->loops[0].v, fd->loops[0].n1); tem = fold_convert (fd->iter_type, tem); } tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_zero_node, NULL_TREE, NULL_TREE); tem = force_gimple_operand_gsi (&gsi, tem, true, NULL_TREE, true, GSI_SAME_STMT); expand_omp_build_assign (&gsi, aref, tem); } t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, iend); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, NULL, cont_bb, l1_bb); /* Emit code to get the next parallel iteration in L2_BB. */ gsi = gsi_start_bb (l2_bb); t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING); } /* Add the loop cleanup function. */ gsi = gsi_last_nondebug_bb (exit_bb); if (gimple_omp_return_nowait_p (gsi_stmt (gsi))) t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT); else if (gimple_omp_return_lhs (gsi_stmt (gsi))) t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL); else t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END); gcall *call_stmt = gimple_build_call (t, 0); if (fd->ordered) { tree arr = counts[fd->ordered]; tree clobber = build_clobber (TREE_TYPE (arr)); gsi_insert_after (&gsi, gimple_build_assign (arr, clobber), GSI_SAME_STMT); } if (gimple_omp_return_lhs (gsi_stmt (gsi))) { gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi))); if (fd->have_reductemp) { gimple *g = gimple_build_assign (reductions, NOP_EXPR, gimple_call_lhs (call_stmt)); gsi_insert_after (&gsi, g, GSI_SAME_STMT); } } gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Connect the new blocks. */ find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE; find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { gimple_seq phis; e = find_edge (cont_bb, l3_bb); ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE); phis = phi_nodes (l3_bb); for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *phi = gsi_stmt (gsi); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne), PHI_ARG_DEF_FROM_EDGE (phi, e)); } remove_edge (e); make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE); e = find_edge (cont_bb, l1_bb); if (e == NULL) { e = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (e->dest) == l1_bb); } if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (e); e = NULL; } else if (fd->collapse > 1) { remove_edge (e); e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else e->flags = EDGE_TRUE_VALUE; if (e) { e->probability = profile_probability::guessed_always ().apply_scale (7, 8); find_edge (cont_bb, l2_bb)->probability = e->probability.invert (); } else { e = find_edge (cont_bb, l2_bb); e->flags = EDGE_FALLTHRU; } make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE); if (gimple_in_ssa_p (cfun)) { /* Add phis to the outer loop that connect to the phis in the inner, original loop, and move the loop entry value of the inner phi to the loop entry value of the outer phi. */ gphi_iterator psi; for (psi = gsi_start_phis (l3_bb); !gsi_end_p (psi); gsi_next (&psi)) { location_t locus; gphi *nphi; gphi *exit_phi = psi.phi (); if (virtual_operand_p (gimple_phi_result (exit_phi))) continue; edge l2_to_l3 = find_edge (l2_bb, l3_bb); tree exit_res = PHI_ARG_DEF_FROM_EDGE (exit_phi, l2_to_l3); basic_block latch = BRANCH_EDGE (cont_bb)->dest; edge latch_to_l1 = find_edge (latch, l1_bb); gphi *inner_phi = find_phi_with_arg_on_edge (exit_res, latch_to_l1); tree t = gimple_phi_result (exit_phi); tree new_res = copy_ssa_name (t, NULL); nphi = create_phi_node (new_res, l0_bb); edge l0_to_l1 = find_edge (l0_bb, l1_bb); t = PHI_ARG_DEF_FROM_EDGE (inner_phi, l0_to_l1); locus = gimple_phi_arg_location_from_edge (inner_phi, l0_to_l1); edge entry_to_l0 = find_edge (entry_bb, l0_bb); add_phi_arg (nphi, t, entry_to_l0, locus); edge l2_to_l0 = find_edge (l2_bb, l0_bb); add_phi_arg (nphi, exit_res, l2_to_l0, UNKNOWN_LOCATION); add_phi_arg (inner_phi, new_res, l0_to_l1, UNKNOWN_LOCATION); } } set_immediate_dominator (CDI_DOMINATORS, l2_bb, recompute_dominator (CDI_DOMINATORS, l2_bb)); set_immediate_dominator (CDI_DOMINATORS, l3_bb, recompute_dominator (CDI_DOMINATORS, l3_bb)); set_immediate_dominator (CDI_DOMINATORS, l0_bb, recompute_dominator (CDI_DOMINATORS, l0_bb)); set_immediate_dominator (CDI_DOMINATORS, l1_bb, recompute_dominator (CDI_DOMINATORS, l1_bb)); /* We enter expand_omp_for_generic with a loop. This original loop may have its own loop struct, or it may be part of an outer loop struct (which may be the fake loop). */ class loop *outer_loop = entry_bb->loop_father; bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop; add_bb_to_loop (l2_bb, outer_loop); /* We've added a new loop around the original loop. Allocate the corresponding loop struct. */ class loop *new_loop = alloc_loop (); new_loop->header = l0_bb; new_loop->latch = l2_bb; add_loop (new_loop, outer_loop); /* Allocate a loop structure for the original loop unless we already had one. */ if (!orig_loop_has_loop_struct && !gimple_omp_for_combined_p (fd->for_stmt)) { class loop *orig_loop = alloc_loop (); orig_loop->header = l1_bb; /* The loop may have multiple latches. */ add_loop (orig_loop, new_loop); } } } /* Helper function for expand_omp_for_static_nochunk. If PTR is NULL, compute needed allocation size. If !ALLOC of team allocations, if ALLOC of thread allocation. SZ is the initial needed size for other purposes, ALLOC_ALIGN guaranteed alignment of allocation in bytes, CNT number of elements of each array, for !ALLOC this is omp_get_num_threads (), for ALLOC number of iterations handled by the current thread. If PTR is non-NULL, it is the start of the allocation and this routine shall assign to OMP_CLAUSE_DECL (c) of those _scantemp_ clauses pointers to the corresponding arrays. */ static tree expand_omp_scantemp_alloc (tree clauses, tree ptr, unsigned HOST_WIDE_INT sz, unsigned HOST_WIDE_INT alloc_align, tree cnt, gimple_stmt_iterator *gsi, bool alloc) { tree eltsz = NULL_TREE; unsigned HOST_WIDE_INT preval = 0; if (ptr && sz) ptr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (ptr), ptr, size_int (sz)); for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_ && !OMP_CLAUSE__SCANTEMP__CONTROL (c) && (!OMP_CLAUSE__SCANTEMP__ALLOC (c)) != alloc) { tree pointee_type = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c))); unsigned HOST_WIDE_INT al = TYPE_ALIGN_UNIT (pointee_type); if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (pointee_type))) { unsigned HOST_WIDE_INT szl = tree_to_uhwi (TYPE_SIZE_UNIT (pointee_type)); szl = least_bit_hwi (szl); if (szl) al = MIN (al, szl); } if (ptr == NULL_TREE) { if (eltsz == NULL_TREE) eltsz = TYPE_SIZE_UNIT (pointee_type); else eltsz = size_binop (PLUS_EXPR, eltsz, TYPE_SIZE_UNIT (pointee_type)); } if (preval == 0 && al <= alloc_align) { unsigned HOST_WIDE_INT diff = ROUND_UP (sz, al) - sz; sz += diff; if (diff && ptr) ptr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (ptr), ptr, size_int (diff)); } else if (al > preval) { if (ptr) { ptr = fold_convert (pointer_sized_int_node, ptr); ptr = fold_build2 (PLUS_EXPR, pointer_sized_int_node, ptr, build_int_cst (pointer_sized_int_node, al - 1)); ptr = fold_build2 (BIT_AND_EXPR, pointer_sized_int_node, ptr, build_int_cst (pointer_sized_int_node, -(HOST_WIDE_INT) al)); ptr = fold_convert (ptr_type_node, ptr); } else sz += al - 1; } if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (pointee_type))) preval = al; else preval = 1; if (ptr) { expand_omp_build_assign (gsi, OMP_CLAUSE_DECL (c), ptr, false); ptr = OMP_CLAUSE_DECL (c); ptr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (ptr), ptr, size_binop (MULT_EXPR, cnt, TYPE_SIZE_UNIT (pointee_type))); } } if (ptr == NULL_TREE) { eltsz = size_binop (MULT_EXPR, eltsz, cnt); if (sz) eltsz = size_binop (PLUS_EXPR, eltsz, size_int (sz)); return eltsz; } else return ptr; } /* Return the last _looptemp_ clause if one has been created for lastprivate on distribute parallel for{, simd} or taskloop. FD is the loop data and INNERC should be the second _looptemp_ clause (the one holding the end of the range). This is followed by collapse - 1 _looptemp_ clauses for the counts[1] and up, and for triangular loops followed by 4 further _looptemp_ clauses (one for counts[0], one first_inner_iterations, one factor and one adjn1). After this there is optionally one _looptemp_ clause that this function returns. */ static tree find_lastprivate_looptemp (struct omp_for_data *fd, tree innerc) { gcc_assert (innerc); int count = fd->collapse - 1; if (fd->non_rect && fd->last_nonrect == fd->first_nonrect + 1 && !TYPE_UNSIGNED (TREE_TYPE (fd->loops[fd->last_nonrect].v))) count += 4; for (int i = 0; i < count; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); } return omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and no specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2; if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; q = n / nthreads; tt = n % nthreads; if (threadid < tt) goto L3; else goto L4; L3: tt = 0; q = q + 1; L4: s0 = q * threadid + tt; e0 = s0 + q; V = s0 * STEP + N1; if (s0 >= e0) goto L2; else goto L0; L0: e = e0 * STEP + N1; L1: BODY; V += STEP; if (V cond e) goto L1; L2: */ static void expand_omp_for_static_nochunk (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree n, q, s0, e0, e, t, tt, nthreads = NULL_TREE, threadid; tree type, itype, vmain, vback; basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb; basic_block body_bb, cont_bb, collapse_bb = NULL; basic_block fin_bb, fourth_bb = NULL, fifth_bb = NULL, sixth_bb = NULL; basic_block exit1_bb = NULL, exit2_bb = NULL, exit3_bb = NULL; gimple_stmt_iterator gsi, gsip; edge ep; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; tree reductions = NULL_TREE; tree cond_var = NULL_TREE, condtemp = NULL_TREE; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); fin_bb = BRANCH_EDGE (entry_bb)->dest; gcc_assert (broken_loop || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest)); seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb)); body_bb = single_succ (seq_start_bb); if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } exit_bb = region->exit; /* Iteration space partitioning goes in ENTRY_BB. */ gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); gsip = gsi; gsi_prev (&gsip); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block l2_dom_bb = NULL, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); t = NULL_TREE; } else if (gimple_omp_for_combined_into_p (fd->for_stmt)) t = integer_one_node; else t = fold_binary (fd->loop.cond_code, boolean_type_node, fold_convert (type, fd->loop.n1), fold_convert (type, fd->loop.n2)); if (fd->collapse == 1 && TYPE_UNSIGNED (type) && (t == NULL_TREE || !integer_onep (t))) { n1 = fold_convert (type, unshare_expr (fd->loop.n1)); n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (type, unshare_expr (fd->loop.n2)); n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } ep = split_block (entry_bb, cond_stmt); ep->flags = EDGE_TRUE_VALUE; entry_bb = ep->dest; ep->probability = profile_probability::very_likely (); ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::very_unlikely (); if (gimple_in_ssa_p (cfun)) { int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx; for (gphi_iterator gpi = gsi_start_phis (fin_bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx), ep, UNKNOWN_LOCATION); } } gsi = gsi_last_bb (entry_bb); } if (fd->lastprivate_conditional) { tree clauses = gimple_omp_for_clauses (fd->for_stmt); tree c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_); if (fd->have_pointer_condtemp) condtemp = OMP_CLAUSE_DECL (c); c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE__CONDTEMP_); cond_var = OMP_CLAUSE_DECL (c); } if (fd->have_reductemp /* For scan, we don't want to reinitialize condtemp before the second loop. */ || (fd->have_pointer_condtemp && !fd->have_scantemp) || fd->have_nonctrl_scantemp) { tree t1 = build_int_cst (long_integer_type_node, 0); tree t2 = build_int_cst (long_integer_type_node, 1); tree t3 = build_int_cstu (long_integer_type_node, (HOST_WIDE_INT_1U << 31) + 1); tree clauses = gimple_omp_for_clauses (fd->for_stmt); gimple_stmt_iterator gsi2 = gsi_none (); gimple *g = NULL; tree mem = null_pointer_node, memv = NULL_TREE; unsigned HOST_WIDE_INT condtemp_sz = 0; unsigned HOST_WIDE_INT alloc_align = 0; if (fd->have_reductemp) { gcc_assert (!fd->have_nonctrl_scantemp); tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); reductions = OMP_CLAUSE_DECL (c); gcc_assert (TREE_CODE (reductions) == SSA_NAME); g = SSA_NAME_DEF_STMT (reductions); reductions = gimple_assign_rhs1 (g); OMP_CLAUSE_DECL (c) = reductions; gsi2 = gsi_for_stmt (g); } else { if (gsi_end_p (gsip)) gsi2 = gsi_after_labels (region->entry); else gsi2 = gsip; reductions = null_pointer_node; } if (fd->have_pointer_condtemp || fd->have_nonctrl_scantemp) { tree type; if (fd->have_pointer_condtemp) type = TREE_TYPE (condtemp); else type = ptr_type_node; memv = create_tmp_var (type); TREE_ADDRESSABLE (memv) = 1; unsigned HOST_WIDE_INT sz = 0; tree size = NULL_TREE; if (fd->have_pointer_condtemp) { sz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); sz *= fd->lastprivate_conditional; condtemp_sz = sz; } if (fd->have_nonctrl_scantemp) { nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); gimple *g = gimple_build_call (nthreads, 0); nthreads = create_tmp_var (integer_type_node); gimple_call_set_lhs (g, nthreads); gsi_insert_before (&gsi2, g, GSI_SAME_STMT); nthreads = fold_convert (sizetype, nthreads); alloc_align = TYPE_ALIGN_UNIT (long_long_integer_type_node); size = expand_omp_scantemp_alloc (clauses, NULL_TREE, sz, alloc_align, nthreads, NULL, false); size = fold_convert (type, size); } else size = build_int_cst (type, sz); expand_omp_build_assign (&gsi2, memv, size, false); mem = build_fold_addr_expr (memv); } tree t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_LOOP_START), 9, t1, t2, t2, t3, t1, null_pointer_node, null_pointer_node, reductions, mem); force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); if (fd->have_pointer_condtemp) expand_omp_build_assign (&gsi2, condtemp, memv, false); if (fd->have_nonctrl_scantemp) { tree ptr = fd->have_pointer_condtemp ? condtemp : memv; expand_omp_scantemp_alloc (clauses, ptr, condtemp_sz, alloc_align, nthreads, &gsi2, false); } if (fd->have_reductemp) { gsi_remove (&gsi2, true); release_ssa_name (gimple_assign_lhs (g)); } } switch (gimple_omp_for_kind (fd->for_stmt)) { case GF_OMP_FOR_KIND_FOR: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); break; case GF_OMP_FOR_KIND_DISTRIBUTE: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM); break; default: gcc_unreachable (); } nthreads = build_call_expr (nthreads, 0); nthreads = fold_convert (itype, nthreads); nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE, true, GSI_SAME_STMT); threadid = build_call_expr (threadid, 0); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); n1 = fd->loop.n1; n2 = fd->loop.n2; step = fd->loop.step; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, step, t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); q = create_tmp_reg (itype, "q"); t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT); tt = create_tmp_reg (itype, "tt"); t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT); t = build2 (LT_EXPR, boolean_type_node, threadid, tt); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); second_bb = split_block (entry_bb, cond_stmt)->dest; gsi = gsi_last_nondebug_bb (second_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)), GSI_SAME_STMT); gassign *assign_stmt = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); third_bb = split_block (second_bb, assign_stmt)->dest; gsi = gsi_last_nondebug_bb (third_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->have_nonctrl_scantemp) { tree clauses = gimple_omp_for_clauses (fd->for_stmt); tree controlp = NULL_TREE, controlb = NULL_TREE; for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_ && OMP_CLAUSE__SCANTEMP__CONTROL (c)) { if (TREE_TYPE (OMP_CLAUSE_DECL (c)) == boolean_type_node) controlb = OMP_CLAUSE_DECL (c); else controlp = OMP_CLAUSE_DECL (c); if (controlb && controlp) break; } gcc_assert (controlp && controlb); tree cnt = create_tmp_var (sizetype); gimple *g = gimple_build_assign (cnt, NOP_EXPR, q); gsi_insert_before (&gsi, g, GSI_SAME_STMT); unsigned HOST_WIDE_INT alloc_align = TYPE_ALIGN_UNIT (ptr_type_node); tree sz = expand_omp_scantemp_alloc (clauses, NULL_TREE, 0, alloc_align, cnt, NULL, true); tree size = create_tmp_var (sizetype); expand_omp_build_assign (&gsi, size, sz, false); tree cmp = fold_build2 (GT_EXPR, boolean_type_node, size, size_int (16384)); expand_omp_build_assign (&gsi, controlb, cmp); g = gimple_build_cond (NE_EXPR, controlb, boolean_false_node, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, g, GSI_SAME_STMT); fourth_bb = split_block (third_bb, g)->dest; gsi = gsi_last_nondebug_bb (fourth_bb); /* FIXME: Once we have allocators, this should use allocator. */ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_MALLOC), 1, size); gimple_call_set_lhs (g, controlp); gsi_insert_before (&gsi, g, GSI_SAME_STMT); expand_omp_scantemp_alloc (clauses, controlp, 0, alloc_align, cnt, &gsi, true); gsi_prev (&gsi); g = gsi_stmt (gsi); fifth_bb = split_block (fourth_bb, g)->dest; gsi = gsi_last_nondebug_bb (fifth_bb); g = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_SAVE), 0); gimple_call_set_lhs (g, controlp); gsi_insert_before (&gsi, g, GSI_SAME_STMT); tree alloca_decl = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN); for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_ && OMP_CLAUSE__SCANTEMP__ALLOC (c)) { tree tmp = create_tmp_var (sizetype); tree pointee_type = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c))); g = gimple_build_assign (tmp, MULT_EXPR, cnt, TYPE_SIZE_UNIT (pointee_type)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); g = gimple_build_call (alloca_decl, 2, tmp, size_int (TYPE_ALIGN (pointee_type))); gimple_call_set_lhs (g, OMP_CLAUSE_DECL (c)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } sixth_bb = split_block (fifth_bb, g)->dest; gsi = gsi_last_nondebug_bb (sixth_bb); } t = build2 (MULT_EXPR, itype, q, threadid); t = build2 (PLUS_EXPR, itype, t, tt); s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (PLUS_EXPR, itype, s0, q); e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build2 (GE_EXPR, boolean_type_node, s0, e0); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); /* Setup code for sequential iteration goes in SEQ_START_BB. */ gsi = gsi_start_bb (seq_start_bb); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL ? gimple_omp_parallel_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE) { innerc = find_lastprivate_looptemp (fd, innerc); if (innerc) { /* If needed (distribute parallel for with lastprivate), propagate down the total number of iterations. */ tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)), fd->loop.n2); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } } } t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) { t = fold_build_pointer_plus (n1, t); if (!POINTER_TYPE_P (TREE_TYPE (startvar)) && TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type)) t = fold_convert (signed_type_for (type), t); } else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (cond_var) { tree itype = TREE_TYPE (cond_var); /* For lastprivate(conditional:) itervar, we need some iteration counter that starts at unsigned non-zero and increases. Prefer as few IVs as possible, so if we can use startvar itself, use that, or startvar + constant (those would be incremented with step), and as last resort use the s0 + 1 incremented by 1. */ if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, s0), build_int_cst (itype, 1)); else if (tree_int_cst_sgn (n1) == 1) t = fold_convert (itype, t); else { tree c = fold_convert (itype, n1); c = fold_build2 (MINUS_EXPR, itype, build_int_cst (itype, 1), c); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, t), c); } t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (cond_var, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) { t = fold_build_pointer_plus (n1, t); if (!POINTER_TYPE_P (TREE_TYPE (startvar)) && TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type)) t = fold_convert (signed_type_for (type), t); } else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Handle linear clause adjustments. */ tree itercnt = NULL_TREE; tree *nonrect_bounds = NULL; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) for (tree c = gimple_omp_for_clauses (fd->for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { tree d = OMP_CLAUSE_DECL (c); bool is_ref = omp_is_reference (d); tree t = d, a, dest; if (is_ref) t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); if (itercnt == NULL_TREE) { if (gimple_omp_for_combined_into_p (fd->for_stmt)) { itercnt = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1), fold_convert (itype, fd->loop.n1)); itercnt = fold_build2 (EXACT_DIV_EXPR, itype, itercnt, step); itercnt = fold_build2 (PLUS_EXPR, itype, itercnt, s0); itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } else itercnt = s0; } tree type = TREE_TYPE (t); if (POINTER_TYPE_P (type)) type = sizetype; a = fold_build2 (MULT_EXPR, type, fold_convert (type, itercnt), fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); dest = unshare_expr (t); t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR : POINTER_PLUS_EXPR, TREE_TYPE (t), t, a); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (dest, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) { if (fd->non_rect) { nonrect_bounds = XALLOCAVEC (tree, fd->last_nonrect + 1); memset (nonrect_bounds, 0, sizeof (tree) * (fd->last_nonrect + 1)); } expand_omp_for_init_vars (fd, &gsi, counts, nonrect_bounds, inner_stmt, startvar); } if (!broken_loop) { /* The code controlling the sequential loop replaces the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (cond_var) { tree itype = TREE_TYPE (cond_var); tree t2; if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t2 = build_int_cst (itype, 1); else t2 = fold_convert (itype, step); t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2); t2 = force_gimple_operand_gsi (&gsi, t2, false, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (cond_var, t2); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, nonrect_bounds, cont_bb, body_bb); } /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ gsi = gsi_last_nondebug_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (gsi))) { t = gimple_omp_return_lhs (gsi_stmt (gsi)); if (fd->have_reductemp || ((fd->have_pointer_condtemp || fd->have_scantemp) && !fd->have_nonctrl_scantemp)) { tree fn; if (t) fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL); else fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END); gcall *g = gimple_build_call (fn, 0); if (t) { gimple_call_set_lhs (g, t); if (fd->have_reductemp) gsi_insert_after (&gsi, gimple_build_assign (reductions, NOP_EXPR, t), GSI_SAME_STMT); } gsi_insert_after (&gsi, g, GSI_SAME_STMT); } else gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT); } else if ((fd->have_pointer_condtemp || fd->have_scantemp) && !fd->have_nonctrl_scantemp) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT); gcall *g = gimple_build_call (fn, 0); gsi_insert_after (&gsi, g, GSI_SAME_STMT); } if (fd->have_scantemp && !fd->have_nonctrl_scantemp) { tree clauses = gimple_omp_for_clauses (fd->for_stmt); tree controlp = NULL_TREE, controlb = NULL_TREE; for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_ && OMP_CLAUSE__SCANTEMP__CONTROL (c)) { if (TREE_TYPE (OMP_CLAUSE_DECL (c)) == boolean_type_node) controlb = OMP_CLAUSE_DECL (c); else controlp = OMP_CLAUSE_DECL (c); if (controlb && controlp) break; } gcc_assert (controlp && controlb); gimple *g = gimple_build_cond (NE_EXPR, controlb, boolean_false_node, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, g, GSI_SAME_STMT); exit1_bb = split_block (exit_bb, g)->dest; gsi = gsi_after_labels (exit1_bb); g = gimple_build_call (builtin_decl_explicit (BUILT_IN_FREE), 1, controlp); gsi_insert_before (&gsi, g, GSI_SAME_STMT); exit2_bb = split_block (exit1_bb, g)->dest; gsi = gsi_after_labels (exit2_bb); g = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_RESTORE), 1, controlp); gsi_insert_before (&gsi, g, GSI_SAME_STMT); exit3_bb = split_block (exit2_bb, g)->dest; gsi = gsi_after_labels (exit3_bb); } gsi_remove (&gsi, true); /* Connect all the blocks. */ ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::guessed_always ().apply_scale (3, 4); ep = find_edge (entry_bb, second_bb); ep->flags = EDGE_TRUE_VALUE; ep->probability = profile_probability::guessed_always ().apply_scale (1, 4); if (fourth_bb) { ep = make_edge (third_bb, fifth_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::guessed_always ().apply_scale (1, 2); ep = find_edge (third_bb, fourth_bb); ep->flags = EDGE_TRUE_VALUE; ep->probability = profile_probability::guessed_always ().apply_scale (1, 2); ep = find_edge (fourth_bb, fifth_bb); redirect_edge_and_branch (ep, sixth_bb); } else sixth_bb = third_bb; find_edge (sixth_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE; find_edge (sixth_bb, fin_bb)->flags = EDGE_TRUE_VALUE; if (exit1_bb) { ep = make_edge (exit_bb, exit2_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::guessed_always ().apply_scale (1, 2); ep = find_edge (exit_bb, exit1_bb); ep->flags = EDGE_TRUE_VALUE; ep->probability = profile_probability::guessed_always ().apply_scale (1, 2); ep = find_edge (exit1_bb, exit2_bb); redirect_edge_and_branch (ep, exit3_bb); } if (!broken_loop) { ep = find_edge (cont_bb, body_bb); if (ep == NULL) { ep = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (ep->dest) == body_bb); } if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (ep); ep = NULL; } else if (fd->collapse > 1) { remove_edge (ep); ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else ep->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, fin_bb)->flags = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; } set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb); if (fourth_bb) { set_immediate_dominator (CDI_DOMINATORS, fifth_bb, third_bb); set_immediate_dominator (CDI_DOMINATORS, sixth_bb, third_bb); } set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, sixth_bb); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); if (exit1_bb) { set_immediate_dominator (CDI_DOMINATORS, exit2_bb, exit_bb); set_immediate_dominator (CDI_DOMINATORS, exit3_bb, exit_bb); } class loop *loop = body_bb->loop_father; if (loop != entry_bb->loop_father) { gcc_assert (broken_loop || loop->header == body_bb); gcc_assert (broken_loop || loop->latch == region->cont || single_pred (loop->latch) == region->cont); return; } if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt)) { loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, body_bb->loop_father); } } /* Return phi in E->DEST with ARG on edge E. */ static gphi * find_phi_with_arg_on_edge (tree arg, edge e) { basic_block bb = e->dest; for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); if (PHI_ARG_DEF_FROM_EDGE (phi, e) == arg) return phi; } return NULL; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and a specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2; if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; trip = 0; V = threadid * CHUNK * STEP + N1; -- this extra definition of V is here so that V is defined if the loop is not entered L0: s0 = (trip * nthreads + threadid) * CHUNK; e0 = min (s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; e = e0 * STEP + N1; L2: BODY; V += STEP; if (V cond e) goto L2; else goto L3; L3: trip += 1; goto L0; L4: */ static void expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree n, s0, e0, e, t; tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid; tree type, itype, vmain, vback, vextra; basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb; basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb; gimple_stmt_iterator gsi, gsip; edge se; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; tree reductions = NULL_TREE; tree cond_var = NULL_TREE, condtemp = NULL_TREE; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); entry_bb = region->entry; se = split_block (entry_bb, last_stmt (entry_bb)); entry_bb = se->src; iter_part_bb = se->dest; cont_bb = region->cont; gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2); fin_bb = BRANCH_EDGE (iter_part_bb)->dest; gcc_assert (broken_loop || fin_bb == FALLTHRU_EDGE (cont_bb)->dest); seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb)); body_bb = single_succ (seq_start_bb); if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb)); } exit_bb = region->exit; /* Trip and adjustment setup goes in ENTRY_BB. */ gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); gsip = gsi; gsi_prev (&gsip); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block l2_dom_bb = NULL, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); t = NULL_TREE; } else if (gimple_omp_for_combined_into_p (fd->for_stmt)) t = integer_one_node; else t = fold_binary (fd->loop.cond_code, boolean_type_node, fold_convert (type, fd->loop.n1), fold_convert (type, fd->loop.n2)); if (fd->collapse == 1 && TYPE_UNSIGNED (type) && (t == NULL_TREE || !integer_onep (t))) { n1 = fold_convert (type, unshare_expr (fd->loop.n1)); n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (type, unshare_expr (fd->loop.n2)); n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } se = split_block (entry_bb, cond_stmt); se->flags = EDGE_TRUE_VALUE; entry_bb = se->dest; se->probability = profile_probability::very_likely (); se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE); se->probability = profile_probability::very_unlikely (); if (gimple_in_ssa_p (cfun)) { int dest_idx = find_edge (iter_part_bb, fin_bb)->dest_idx; for (gphi_iterator gpi = gsi_start_phis (fin_bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx), se, UNKNOWN_LOCATION); } } gsi = gsi_last_bb (entry_bb); } if (fd->lastprivate_conditional) { tree clauses = gimple_omp_for_clauses (fd->for_stmt); tree c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_); if (fd->have_pointer_condtemp) condtemp = OMP_CLAUSE_DECL (c); c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE__CONDTEMP_); cond_var = OMP_CLAUSE_DECL (c); } if (fd->have_reductemp || fd->have_pointer_condtemp) { tree t1 = build_int_cst (long_integer_type_node, 0); tree t2 = build_int_cst (long_integer_type_node, 1); tree t3 = build_int_cstu (long_integer_type_node, (HOST_WIDE_INT_1U << 31) + 1); tree clauses = gimple_omp_for_clauses (fd->for_stmt); gimple_stmt_iterator gsi2 = gsi_none (); gimple *g = NULL; tree mem = null_pointer_node, memv = NULL_TREE; if (fd->have_reductemp) { tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); reductions = OMP_CLAUSE_DECL (c); gcc_assert (TREE_CODE (reductions) == SSA_NAME); g = SSA_NAME_DEF_STMT (reductions); reductions = gimple_assign_rhs1 (g); OMP_CLAUSE_DECL (c) = reductions; gsi2 = gsi_for_stmt (g); } else { if (gsi_end_p (gsip)) gsi2 = gsi_after_labels (region->entry); else gsi2 = gsip; reductions = null_pointer_node; } if (fd->have_pointer_condtemp) { tree type = TREE_TYPE (condtemp); memv = create_tmp_var (type); TREE_ADDRESSABLE (memv) = 1; unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); sz *= fd->lastprivate_conditional; expand_omp_build_assign (&gsi2, memv, build_int_cst (type, sz), false); mem = build_fold_addr_expr (memv); } tree t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_LOOP_START), 9, t1, t2, t2, t3, t1, null_pointer_node, null_pointer_node, reductions, mem); force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); if (fd->have_pointer_condtemp) expand_omp_build_assign (&gsi2, condtemp, memv, false); if (fd->have_reductemp) { gsi_remove (&gsi2, true); release_ssa_name (gimple_assign_lhs (g)); } } switch (gimple_omp_for_kind (fd->for_stmt)) { case GF_OMP_FOR_KIND_FOR: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); break; case GF_OMP_FOR_KIND_DISTRIBUTE: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM); break; default: gcc_unreachable (); } nthreads = build_call_expr (nthreads, 0); nthreads = fold_convert (itype, nthreads); nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE, true, GSI_SAME_STMT); threadid = build_call_expr (threadid, 0); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); n1 = fd->loop.n1; n2 = fd->loop.n2; step = fd->loop.step; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); tree chunk_size = fold_convert (itype, fd->chunk_size); chunk_size = omp_adjust_chunk_size (chunk_size, fd->simd_schedule); chunk_size = force_gimple_operand_gsi (&gsi, chunk_size, true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, step, t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); trip_var = create_tmp_reg (itype, ".trip"); if (gimple_in_ssa_p (cfun)) { trip_init = make_ssa_name (trip_var); trip_main = make_ssa_name (trip_var); trip_back = make_ssa_name (trip_var); } else { trip_init = trip_var; trip_main = trip_var; trip_back = trip_var; } gassign *assign_stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = fold_build2 (MULT_EXPR, itype, threadid, chunk_size); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR. */ gsi_remove (&gsi, true); gimple_stmt_iterator gsif = gsi; /* Iteration space partitioning goes in ITER_PART_BB. */ gsi = gsi_last_bb (iter_part_bb); t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads); t = fold_build2 (PLUS_EXPR, itype, t, threadid); t = fold_build2 (MULT_EXPR, itype, t, chunk_size); s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (PLUS_EXPR, itype, s0, chunk_size); t = fold_build2 (MIN_EXPR, itype, t, n); e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (LT_EXPR, boolean_type_node, s0, n); gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING); /* Setup code for sequential iteration goes in SEQ_START_BB. */ gsi = gsi_start_bb (seq_start_bb); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL ? gimple_omp_parallel_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE) { innerc = find_lastprivate_looptemp (fd, innerc); if (innerc) { /* If needed (distribute parallel for with lastprivate), propagate down the total number of iterations. */ tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)), fd->loop.n2); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } } } t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) { t = fold_build_pointer_plus (n1, t); if (!POINTER_TYPE_P (TREE_TYPE (startvar)) && TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type)) t = fold_convert (signed_type_for (type), t); } else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (cond_var) { tree itype = TREE_TYPE (cond_var); /* For lastprivate(conditional:) itervar, we need some iteration counter that starts at unsigned non-zero and increases. Prefer as few IVs as possible, so if we can use startvar itself, use that, or startvar + constant (those would be incremented with step), and as last resort use the s0 + 1 incremented by 1. */ if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, s0), build_int_cst (itype, 1)); else if (tree_int_cst_sgn (n1) == 1) t = fold_convert (itype, t); else { tree c = fold_convert (itype, n1); c = fold_build2 (MINUS_EXPR, itype, build_int_cst (itype, 1), c); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, t), c); } t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (cond_var, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) { t = fold_build_pointer_plus (n1, t); if (!POINTER_TYPE_P (TREE_TYPE (startvar)) && TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type)) t = fold_convert (signed_type_for (type), t); } else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Handle linear clause adjustments. */ tree itercnt = NULL_TREE, itercntbias = NULL_TREE; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) for (tree c = gimple_omp_for_clauses (fd->for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { tree d = OMP_CLAUSE_DECL (c); bool is_ref = omp_is_reference (d); tree t = d, a, dest; if (is_ref) t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); tree type = TREE_TYPE (t); if (POINTER_TYPE_P (type)) type = sizetype; dest = unshare_expr (t); tree v = create_tmp_var (TREE_TYPE (t), NULL); expand_omp_build_assign (&gsif, v, t); if (itercnt == NULL_TREE) { if (gimple_omp_for_combined_into_p (fd->for_stmt)) { itercntbias = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1), fold_convert (itype, fd->loop.n1)); itercntbias = fold_build2 (EXACT_DIV_EXPR, itype, itercntbias, step); itercntbias = force_gimple_operand_gsi (&gsif, itercntbias, true, NULL_TREE, true, GSI_SAME_STMT); itercnt = fold_build2 (PLUS_EXPR, itype, itercntbias, s0); itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } else itercnt = s0; } a = fold_build2 (MULT_EXPR, type, fold_convert (type, itercnt), fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (dest, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, NULL, inner_stmt, startvar); if (!broken_loop) { /* The code controlling the sequential loop goes in CONT_BB, replacing the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (cond_var) { tree itype = TREE_TYPE (cond_var); tree t2; if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR) t2 = build_int_cst (itype, 1); else t2 = fold_convert (itype, step); t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2); t2 = force_gimple_operand_gsi (&gsi, t2, false, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (cond_var, t2); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); if (DECL_P (vback) && TREE_ADDRESSABLE (vback)) t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); if (tree_int_cst_equal (fd->chunk_size, integer_one_node)) t = build2 (EQ_EXPR, boolean_type_node, build_int_cst (itype, 0), build_int_cst (itype, 1)); else t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, NULL, cont_bb, body_bb); /* Trip update code goes into TRIP_UPDATE_BB. */ gsi = gsi_start_bb (trip_update_bb); t = build_int_cst (itype, 1); t = build2 (PLUS_EXPR, itype, trip_main, t); assign_stmt = gimple_build_assign (trip_back, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ gsi = gsi_last_nondebug_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (gsi))) { t = gimple_omp_return_lhs (gsi_stmt (gsi)); if (fd->have_reductemp || fd->have_pointer_condtemp) { tree fn; if (t) fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL); else fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END); gcall *g = gimple_build_call (fn, 0); if (t) { gimple_call_set_lhs (g, t); if (fd->have_reductemp) gsi_insert_after (&gsi, gimple_build_assign (reductions, NOP_EXPR, t), GSI_SAME_STMT); } gsi_insert_after (&gsi, g, GSI_SAME_STMT); } else gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT); } else if (fd->have_pointer_condtemp) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT); gcall *g = gimple_build_call (fn, 0); gsi_insert_after (&gsi, g, GSI_SAME_STMT); } gsi_remove (&gsi, true); /* Connect the new blocks. */ find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE; find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { se = find_edge (cont_bb, body_bb); if (se == NULL) { se = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (se->dest) == body_bb); } if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (se); se = NULL; } else if (fd->collapse > 1) { remove_edge (se); se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else se->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, trip_update_bb)->flags = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb); } if (gimple_in_ssa_p (cfun)) { gphi_iterator psi; gphi *phi; edge re, ene; edge_var_map *vm; size_t i; gcc_assert (fd->collapse == 1 && !broken_loop); /* When we redirect the edge from trip_update_bb to iter_part_bb, we remove arguments of the phi nodes in fin_bb. We need to create appropriate phi nodes in iter_part_bb instead. */ se = find_edge (iter_part_bb, fin_bb); re = single_succ_edge (trip_update_bb); vec<edge_var_map> *head = redirect_edge_var_map_vector (re); ene = single_succ_edge (entry_bb); psi = gsi_start_phis (fin_bb); for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm); gsi_next (&psi), ++i) { gphi *nphi; location_t locus; phi = psi.phi (); if (operand_equal_p (gimple_phi_arg_def (phi, 0), redirect_edge_var_map_def (vm), 0)) continue; t = gimple_phi_result (phi); gcc_assert (t == redirect_edge_var_map_result (vm)); if (!single_pred_p (fin_bb)) t = copy_ssa_name (t, phi); nphi = create_phi_node (t, iter_part_bb); t = PHI_ARG_DEF_FROM_EDGE (phi, se); locus = gimple_phi_arg_location_from_edge (phi, se); /* A special case -- fd->loop.v is not yet computed in iter_part_bb, we need to use vextra instead. */ if (t == fd->loop.v) t = vextra; add_phi_arg (nphi, t, ene, locus); locus = redirect_edge_var_map_location (vm); tree back_arg = redirect_edge_var_map_def (vm); add_phi_arg (nphi, back_arg, re, locus); edge ce = find_edge (cont_bb, body_bb); if (ce == NULL) { ce = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (ce->dest) == body_bb); ce = single_succ_edge (ce->dest); } gphi *inner_loop_phi = find_phi_with_arg_on_edge (back_arg, ce); gcc_assert (inner_loop_phi != NULL); add_phi_arg (inner_loop_phi, gimple_phi_result (nphi), find_edge (seq_start_bb, body_bb), locus); if (!single_pred_p (fin_bb)) add_phi_arg (phi, gimple_phi_result (nphi), se, locus); } gcc_assert (gsi_end_p (psi) && (head == NULL || i == head->length ())); redirect_edge_var_map_clear (re); if (single_pred_p (fin_bb)) while (1) { psi = gsi_start_phis (fin_bb); if (gsi_end_p (psi)) break; remove_phi_node (&psi, false); } /* Make phi node for trip. */ phi = create_phi_node (trip_main, iter_part_bb); add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb), UNKNOWN_LOCATION); add_phi_arg (phi, trip_init, single_succ_edge (entry_bb), UNKNOWN_LOCATION); } if (!broken_loop) set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb); set_immediate_dominator (CDI_DOMINATORS, iter_part_bb, recompute_dominator (CDI_DOMINATORS, iter_part_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, recompute_dominator (CDI_DOMINATORS, seq_start_bb)); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); if (!broken_loop) { class loop *loop = body_bb->loop_father; class loop *trip_loop = alloc_loop (); trip_loop->header = iter_part_bb; trip_loop->latch = trip_update_bb; add_loop (trip_loop, iter_part_bb->loop_father); if (loop != entry_bb->loop_father) { gcc_assert (loop->header == body_bb); gcc_assert (loop->latch == region->cont || single_pred (loop->latch) == region->cont); trip_loop->inner = loop; return; } if (!gimple_omp_for_combined_p (fd->for_stmt)) { loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, trip_loop); } } } /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing loop. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode V = N1; goto L1; L0: BODY; V += STEP; L1: if (V cond N2) goto L0; else goto L2; L2: For collapsed loops, emit the outer loops as scalar and only try to vectorize the innermost loop. */ static void expand_omp_simd (struct omp_region *region, struct omp_for_data *fd) { tree type, t; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb; gimple_stmt_iterator gsi; gimple *stmt; gcond *cond_stmt; bool broken_loop = region->cont == NULL; edge e, ne; tree *counts = NULL; int i; int safelen_int = INT_MAX; bool dont_vectorize = false; tree safelen = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_SAFELEN); tree simduid = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__SIMDUID_); tree ifc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_IF); tree simdlen = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_SIMDLEN); tree condtemp = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__CONDTEMP_); tree n1, n2; tree cond_var = condtemp ? OMP_CLAUSE_DECL (condtemp) : NULL_TREE; if (safelen) { poly_uint64 val; safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen); if (!poly_int_tree_p (safelen, &val)) safelen_int = 0; else safelen_int = MIN (constant_lower_bound (val), INT_MAX); if (safelen_int == 1) safelen_int = 0; } if ((ifc && integer_zerop (OMP_CLAUSE_IF_EXPR (ifc))) || (simdlen && integer_onep (OMP_CLAUSE_SIMDLEN_EXPR (simdlen)))) { safelen_int = 0; dont_vectorize = true; } type = TREE_TYPE (fd->loop.v); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = FALLTHRU_EDGE (entry_bb)->dest; if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest; l2_bb = BRANCH_EDGE (entry_bb)->dest; } else { BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL; l1_bb = split_edge (BRANCH_EDGE (entry_bb)); l2_bb = single_succ (l1_bb); } exit_bb = region->exit; l2_dom_bb = NULL; gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); /* Not needed in SSA form right now. */ gcc_assert (!gimple_in_ssa_p (cfun)); if (fd->collapse > 1 && (gimple_omp_for_combined_into_p (fd->for_stmt) || broken_loop)) { int first_zero_iter = -1, dummy = -1; basic_block zero_iter_bb = l2_bb, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); } if (l2_dom_bb == NULL) l2_dom_bb = l1_bb; n1 = fd->loop.n1; n2 = fd->loop.n2; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } tree step = fd->loop.step; bool is_simt = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__SIMT_); if (is_simt) { cfun->curr_properties &= ~PROP_gimple_lomp_dev; is_simt = safelen_int > 1; } tree simt_lane = NULL_TREE, simt_maxlane = NULL_TREE; if (is_simt) { simt_lane = create_tmp_var (unsigned_type_node); gimple *g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0); gimple_call_set_lhs (g, simt_lane); gsi_insert_before (&gsi, g, GSI_SAME_STMT); tree offset = fold_build2 (MULT_EXPR, TREE_TYPE (step), step, fold_convert (TREE_TYPE (step), simt_lane)); n1 = fold_convert (type, n1); if (POINTER_TYPE_P (type)) n1 = fold_build_pointer_plus (n1, offset); else n1 = fold_build2 (PLUS_EXPR, type, n1, fold_convert (type, offset)); /* Collapsed loops not handled for SIMT yet: limit to one lane only. */ if (fd->collapse > 1) simt_maxlane = build_one_cst (unsigned_type_node); else if (safelen_int < omp_max_simt_vf ()) simt_maxlane = build_int_cst (unsigned_type_node, safelen_int); tree vf = build_call_expr_internal_loc (UNKNOWN_LOCATION, IFN_GOMP_SIMT_VF, unsigned_type_node, 0); if (simt_maxlane) vf = fold_build2 (MIN_EXPR, unsigned_type_node, vf, simt_maxlane); vf = fold_convert (TREE_TYPE (step), vf); step = fold_build2 (MULT_EXPR, TREE_TYPE (step), step, vf); } tree n2var = NULL_TREE; tree n2v = NULL_TREE; tree *nonrect_bounds = NULL; tree min_arg1 = NULL_TREE, min_arg2 = NULL_TREE; if (fd->collapse > 1) { if (broken_loop || gimple_omp_for_combined_into_p (fd->for_stmt)) { if (fd->non_rect) { nonrect_bounds = XALLOCAVEC (tree, fd->last_nonrect + 1); memset (nonrect_bounds, 0, sizeof (tree) * (fd->last_nonrect + 1)); } expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n1)); gcc_assert (entry_bb == gsi_bb (gsi)); gcc_assert (fd->for_stmt == gsi_stmt (gsi)); gsi_prev (&gsi); entry_bb = split_block (entry_bb, gsi_stmt (gsi))->dest; expand_omp_for_init_vars (fd, &gsi, counts, nonrect_bounds, NULL, n1); gsi = gsi_for_stmt (fd->for_stmt); } if (broken_loop) ; else if (gimple_omp_for_combined_into_p (fd->for_stmt)) { /* Compute in n2var the limit for the first innermost loop, i.e. fd->loop.v + MIN (n2 - fd->loop.v, cnt) where cnt is how many iterations would the loop have if all further iterations were assigned to the current task. */ n2var = create_tmp_var (type); i = fd->collapse - 1; tree itype = TREE_TYPE (fd->loops[i].v); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[i].step), t); t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n2)); if (fd->loops[i].m2) { tree t2 = fold_convert (itype, fd->loops[i - fd->loops[i].outer].v); tree t3 = fold_convert (itype, fd->loops[i].m2); t2 = fold_build2 (MULT_EXPR, TREE_TYPE (t), t2, t3); t = fold_build2 (PLUS_EXPR, itype, t, t2); } t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].v)); if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fold_convert (itype, fd->loops[i].step))); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); t = fold_convert (type, t); tree t2 = fold_build2 (MINUS_EXPR, type, n2, n1); min_arg1 = create_tmp_var (type); expand_omp_build_assign (&gsi, min_arg1, t2); min_arg2 = create_tmp_var (type); expand_omp_build_assign (&gsi, min_arg2, t); } else { if (TREE_CODE (n2) == INTEGER_CST) { /* Indicate for lastprivate handling that at least one iteration has been performed, without wasting runtime. */ if (integer_nonzerop (n2)) expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n2)); else /* Indicate that no iteration has been performed. */ expand_omp_build_assign (&gsi, fd->loop.v, build_one_cst (type)); } else { expand_omp_build_assign (&gsi, fd->loop.v, build_zero_cst (type)); expand_omp_build_assign (&gsi, n2, build_one_cst (type)); } for (i = 0; i < fd->collapse; i++) { t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1); if (fd->loops[i].m1) { tree t2 = fold_convert (TREE_TYPE (t), fd->loops[i - fd->loops[i].outer].v); tree t3 = fold_convert (TREE_TYPE (t), fd->loops[i].m1); t2 = fold_build2 (MULT_EXPR, TREE_TYPE (t), t2, t3); t = fold_build2 (PLUS_EXPR, TREE_TYPE (t), t, t2); } expand_omp_build_assign (&gsi, fd->loops[i].v, t); /* For normal non-combined collapsed loops just initialize the outermost iterator in the entry_bb. */ if (!broken_loop) break; } } } else expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n1)); tree altv = NULL_TREE, altn2 = NULL_TREE; if (fd->collapse == 1 && !broken_loop && TREE_CODE (fd->loops[0].step) != INTEGER_CST) { /* The vectorizer currently punts on loops with non-constant steps for the main IV (can't compute number of iterations and gives up because of that). As for OpenMP loops it is always possible to compute the number of iterations upfront, use an alternate IV as the loop iterator: altn2 = n1 < n2 ? (n2 - n1 + step - 1) / step : 0; for (i = n1, altv = 0; altv < altn2; altv++, i += step) */ altv = create_tmp_var (unsigned_type_for (TREE_TYPE (fd->loops[0].v))); expand_omp_build_assign (&gsi, altv, build_zero_cst (TREE_TYPE (altv))); tree itype = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loop.step), t); t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, n2)); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.v)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fold_convert (itype, fd->loop.step))); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fold_convert (itype, fd->loop.step)); t = fold_convert (TREE_TYPE (altv), t); altn2 = create_tmp_var (TREE_TYPE (altv)); expand_omp_build_assign (&gsi, altn2, t); tree t2 = fold_convert (TREE_TYPE (fd->loop.v), n2); t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE, true, GSI_SAME_STMT); t2 = fold_build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t2); gassign *g = gimple_build_assign (altn2, COND_EXPR, t2, altn2, build_zero_cst (TREE_TYPE (altv))); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } else if (fd->collapse > 1 && !broken_loop && !gimple_omp_for_combined_into_p (fd->for_stmt) && TREE_CODE (fd->loops[fd->collapse - 1].step) != INTEGER_CST) { altv = create_tmp_var (unsigned_type_for (TREE_TYPE (fd->loops[0].v))); altn2 = create_tmp_var (TREE_TYPE (altv)); } if (cond_var) { if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR || tree_int_cst_sgn (n1) != 1) expand_omp_build_assign (&gsi, cond_var, build_one_cst (TREE_TYPE (cond_var))); else expand_omp_build_assign (&gsi, cond_var, fold_convert (TREE_TYPE (cond_var), n1)); } /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); if (!broken_loop) { /* Code to control the increment goes in the CONT_BB. */ gsi = gsi_last_nondebug_bb (cont_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE); if (fd->collapse == 1 || gimple_omp_for_combined_into_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.v, step); else t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step); expand_omp_build_assign (&gsi, fd->loop.v, t); } else if (TREE_CODE (n2) != INTEGER_CST) expand_omp_build_assign (&gsi, fd->loop.v, build_one_cst (type)); if (altv) { t = fold_build2 (PLUS_EXPR, TREE_TYPE (altv), altv, build_one_cst (TREE_TYPE (altv))); expand_omp_build_assign (&gsi, altv, t); } if (fd->collapse > 1) { i = fd->collapse - 1; if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))) { t = fold_convert (sizetype, fd->loops[i].step); t = fold_build_pointer_plus (fd->loops[i].v, t); } else { t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].step); t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, t); } expand_omp_build_assign (&gsi, fd->loops[i].v, t); } if (cond_var) { if (POINTER_TYPE_P (type) || TREE_CODE (n1) != INTEGER_CST || fd->loop.cond_code != LT_EXPR || tree_int_cst_sgn (n1) != 1) t = fold_build2 (PLUS_EXPR, TREE_TYPE (cond_var), cond_var, build_one_cst (TREE_TYPE (cond_var))); else t = fold_build2 (PLUS_EXPR, TREE_TYPE (cond_var), cond_var, fold_convert (TREE_TYPE (cond_var), step)); expand_omp_build_assign (&gsi, cond_var, t); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); } /* Emit the condition in L1_BB. */ gsi = gsi_start_bb (l1_bb); if (altv) t = build2 (LT_EXPR, boolean_type_node, altv, altn2); else if (fd->collapse > 1 && !gimple_omp_for_combined_into_p (fd->for_stmt) && !broken_loop) { i = fd->collapse - 1; tree itype = TREE_TYPE (fd->loops[i].v); if (fd->loops[i].m2) t = n2v = create_tmp_var (itype); else t = fold_convert (itype, fd->loops[i].n2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree v = fd->loops[i].v; if (DECL_P (v) && TREE_ADDRESSABLE (v)) v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (fd->loops[i].cond_code, boolean_type_node, v, t); } else { if (fd->collapse > 1 && !broken_loop) t = n2var; else t = fold_convert (type, n2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree v = fd->loop.v; if (DECL_P (v) && TREE_ADDRESSABLE (v)) v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (fd->loop.cond_code, boolean_type_node, v, t); } cond_stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } /* Add 'V -= STEP * (SIMT_VF - 1)' after the loop. */ if (is_simt) { gsi = gsi_start_bb (l2_bb); step = fold_build2 (MINUS_EXPR, TREE_TYPE (step), fd->loop.step, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.v, step); else t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step); expand_omp_build_assign (&gsi, fd->loop.v, t); } /* Remove GIMPLE_OMP_RETURN. */ gsi = gsi_last_nondebug_bb (exit_bb); gsi_remove (&gsi, true); /* Connect the new blocks. */ remove_edge (FALLTHRU_EDGE (entry_bb)); if (!broken_loop) { remove_edge (BRANCH_EDGE (entry_bb)); make_edge (entry_bb, l1_bb, EDGE_FALLTHRU); e = BRANCH_EDGE (l1_bb); ne = FALLTHRU_EDGE (l1_bb); e->flags = EDGE_TRUE_VALUE; } else { single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; ne = single_succ_edge (l1_bb); e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE); } ne->flags = EDGE_FALSE_VALUE; e->probability = profile_probability::guessed_always ().apply_scale (7, 8); ne->probability = e->probability.invert (); set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb); if (simt_maxlane) { cond_stmt = gimple_build_cond (LT_EXPR, simt_lane, simt_maxlane, NULL_TREE, NULL_TREE); gsi = gsi_last_bb (entry_bb); gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT); make_edge (entry_bb, l2_bb, EDGE_FALSE_VALUE); FALLTHRU_EDGE (entry_bb)->flags = EDGE_TRUE_VALUE; FALLTHRU_EDGE (entry_bb)->probability = profile_probability::guessed_always ().apply_scale (7, 8); BRANCH_EDGE (entry_bb)->probability = FALLTHRU_EDGE (entry_bb)->probability.invert (); l2_dom_bb = entry_bb; } set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb); if (!broken_loop && fd->collapse > 1) { basic_block last_bb = l1_bb; basic_block init_bb = NULL; for (i = fd->collapse - 2; i >= 0; i--) { tree nextn2v = NULL_TREE; if (EDGE_SUCC (last_bb, 0)->flags & EDGE_FALSE_VALUE) e = EDGE_SUCC (last_bb, 0); else e = EDGE_SUCC (last_bb, 1); basic_block bb = split_edge (e); if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))) { t = fold_convert (sizetype, fd->loops[i].step); t = fold_build_pointer_plus (fd->loops[i].v, t); } else { t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].step); t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, t); } gsi = gsi_after_labels (bb); expand_omp_build_assign (&gsi, fd->loops[i].v, t); bb = split_block (bb, last_stmt (bb))->dest; gsi = gsi_start_bb (bb); tree itype = TREE_TYPE (fd->loops[i].v); if (fd->loops[i].m2) t = nextn2v = create_tmp_var (itype); else t = fold_convert (itype, fd->loops[i].n2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree v = fd->loops[i].v; if (DECL_P (v) && TREE_ADDRESSABLE (v)) v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (fd->loops[i].cond_code, boolean_type_node, v, t); cond_stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } ne = single_succ_edge (bb); ne->flags = EDGE_FALSE_VALUE; init_bb = create_empty_bb (bb); set_immediate_dominator (CDI_DOMINATORS, init_bb, bb); add_bb_to_loop (init_bb, bb->loop_father); e = make_edge (bb, init_bb, EDGE_TRUE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (7, 8); ne->probability = e->probability.invert (); gsi = gsi_after_labels (init_bb); t = fold_convert (TREE_TYPE (fd->loops[i + 1].v), fd->loops[i + 1].n1); if (fd->loops[i + 1].m1) { tree t2 = fold_convert (TREE_TYPE (t), fd->loops[i + 1 - fd->loops[i + 1].outer].v); tree t3 = fold_convert (TREE_TYPE (t), fd->loops[i + 1].m1); t2 = fold_build2 (MULT_EXPR, TREE_TYPE (t), t2, t3); t = fold_build2 (PLUS_EXPR, TREE_TYPE (t), t, t2); } expand_omp_build_assign (&gsi, fd->loops[i + 1].v, t); if (fd->loops[i + 1].m2) { if (i + 2 == fd->collapse && (n2var || altv)) { gcc_assert (n2v == NULL_TREE); n2v = create_tmp_var (TREE_TYPE (fd->loops[i + 1].v)); } t = fold_convert (TREE_TYPE (fd->loops[i + 1].v), fd->loops[i + 1].n2); tree t2 = fold_convert (TREE_TYPE (t), fd->loops[i + 1 - fd->loops[i + 1].outer].v); tree t3 = fold_convert (TREE_TYPE (t), fd->loops[i + 1].m2); t2 = fold_build2 (MULT_EXPR, TREE_TYPE (t), t2, t3); t = fold_build2 (PLUS_EXPR, TREE_TYPE (t), t, t2); expand_omp_build_assign (&gsi, n2v, t); } if (i + 2 == fd->collapse && n2var) { /* For composite simd, n2 is the first iteration the current task shouldn't already handle, so we effectively want to use for (V3 = N31; V < N2 && V3 < N32; V++, V3 += STEP3) as the vectorized loop. Except the vectorizer will not vectorize that, so instead compute N2VAR as N2VAR = V + MIN (N2 - V, COUNTS3) and use for (V3 = N31; V < N2VAR; V++, V3 += STEP3) as the loop to vectorize. */ tree t2 = fold_build2 (MINUS_EXPR, type, n2, fd->loop.v); if (fd->loops[i + 1].m1 || fd->loops[i + 1].m2) { t = build_int_cst (itype, (fd->loops[i + 1].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[i + 1].step), t); if (fd->loops[i + 1].m2) t = fold_build2 (PLUS_EXPR, itype, t, n2v); else t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, fd->loops[i + 1].n2)); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loops[i + 1].v)); tree step = fold_convert (itype, fd->loops[i + 1].step); if (TYPE_UNSIGNED (itype) && fd->loops[i + 1].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (type, t); } else t = counts[i + 1]; expand_omp_build_assign (&gsi, min_arg1, t2); expand_omp_build_assign (&gsi, min_arg2, t); e = split_block (init_bb, last_stmt (init_bb)); gsi = gsi_after_labels (e->dest); init_bb = e->dest; remove_edge (FALLTHRU_EDGE (entry_bb)); make_edge (entry_bb, init_bb, EDGE_FALLTHRU); set_immediate_dominator (CDI_DOMINATORS, init_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, l1_bb, init_bb); t = fold_build2 (MIN_EXPR, type, min_arg1, min_arg2); t = fold_build2 (PLUS_EXPR, type, fd->loop.v, t); expand_omp_build_assign (&gsi, n2var, t); } if (i + 2 == fd->collapse && altv) { /* The vectorizer currently punts on loops with non-constant steps for the main IV (can't compute number of iterations and gives up because of that). As for OpenMP loops it is always possible to compute the number of iterations upfront, use an alternate IV as the loop iterator. */ expand_omp_build_assign (&gsi, altv, build_zero_cst (TREE_TYPE (altv))); tree itype = TREE_TYPE (fd->loops[i + 1].v); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (fd->loops[i + 1].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[i + 1].step), t); t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, fd->loops[i + 1].m2 ? n2v : fd->loops[i + 1].n2)); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loops[i + 1].v)); tree step = fold_convert (itype, fd->loops[i + 1].step); if (TYPE_UNSIGNED (itype) && fd->loops[i + 1].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (TREE_TYPE (altv), t); expand_omp_build_assign (&gsi, altn2, t); tree t2 = fold_convert (TREE_TYPE (fd->loops[i + 1].v), fd->loops[i + 1].m2 ? n2v : fd->loops[i + 1].n2); t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE, true, GSI_SAME_STMT); t2 = fold_build2 (fd->loops[i + 1].cond_code, boolean_type_node, fd->loops[i + 1].v, t2); gassign *g = gimple_build_assign (altn2, COND_EXPR, t2, altn2, build_zero_cst (TREE_TYPE (altv))); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } n2v = nextn2v; make_edge (init_bb, last_bb, EDGE_FALLTHRU); if (!gimple_omp_for_combined_into_p (fd->for_stmt)) { e = find_edge (entry_bb, last_bb); redirect_edge_succ (e, bb); set_immediate_dominator (CDI_DOMINATORS, bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, last_bb, init_bb); } last_bb = bb; } } if (!broken_loop) { class loop *loop = alloc_loop (); loop->header = l1_bb; loop->latch = cont_bb; add_loop (loop, l1_bb->loop_father); loop->safelen = safelen_int; if (simduid) { loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid); cfun->has_simduid_loops = true; } /* If not -fno-tree-loop-vectorize, hint that we want to vectorize the loop. */ if ((flag_tree_loop_vectorize || !global_options_set.x_flag_tree_loop_vectorize) && flag_tree_loop_optimize && loop->safelen > 1) { loop->force_vectorize = true; if (simdlen && tree_fits_uhwi_p (OMP_CLAUSE_SIMDLEN_EXPR (simdlen))) { unsigned HOST_WIDE_INT v = tree_to_uhwi (OMP_CLAUSE_SIMDLEN_EXPR (simdlen)); if (v < INT_MAX && v <= (unsigned HOST_WIDE_INT) loop->safelen) loop->simdlen = v; } cfun->has_force_vectorize_loops = true; } else if (dont_vectorize) loop->dont_vectorize = true; } else if (simduid) cfun->has_simduid_loops = true; } /* Taskloop construct is represented after gimplification with two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched in between them. This routine expands the outer GIMPLE_OMP_FOR, which should just compute all the needed loop temporaries for GIMPLE_OMP_TASK. */ static void expand_omp_taskloop_for_outer (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree type, bias = NULL_TREE; basic_block entry_bb, cont_bb, exit_bb; gimple_stmt_iterator gsi; gassign *assign_stmt; tree *counts = NULL; int i; gcc_assert (inner_stmt); gcc_assert (region->cont); gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_TASK && gimple_omp_task_taskloop_p (inner_stmt)); type = TREE_TYPE (fd->loop.v); /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type)) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); exit_bb = region->exit; gsi = gsi_last_nondebug_bb (entry_bb); gimple *for_stmt = gsi_stmt (gsi); gcc_assert (gimple_code (for_stmt) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block zero_iter_bb = NULL, dummy_bb = NULL, l2_dom_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); if (zero_iter_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter; i < fd->collapse; i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; gsi_prev (&gsi); edge e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter_bb)); } } tree t0, t1; t1 = fd->loop.n2; t0 = fd->loop.n1; if (POINTER_TYPE_P (TREE_TYPE (t0)) && TYPE_PRECISION (TREE_TYPE (t0)) != TYPE_PRECISION (fd->iter_type)) { /* Avoid casting pointers to integer of a different size. */ tree itype = signed_type_for (type); t1 = fold_convert (fd->iter_type, fold_convert (itype, t1)); t0 = fold_convert (fd->iter_type, fold_convert (itype, t0)); } else { t1 = fold_convert (fd->iter_type, t1); t0 = fold_convert (fd->iter_type, t0); } if (bias) { t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias); t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias); } tree innerc = omp_find_clause (gimple_omp_task_clauses (inner_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); tree startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); tree endvar = OMP_CLAUSE_DECL (innerc); if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST) { innerc = find_lastprivate_looptemp (fd, innerc); if (innerc) { /* If needed (inner taskloop has lastprivate clause), propagate down the total number of iterations. */ tree t = force_gimple_operand_gsi (&gsi, fd->loop.n2, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } } t0 = force_gimple_operand_gsi (&gsi, t0, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t0); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t1 = force_gimple_operand_gsi (&gsi, t1, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (endvar, t1); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, NULL, inner_stmt, startvar); /* Remove the GIMPLE_OMP_FOR statement. */ gsi = gsi_for_stmt (for_stmt); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (cont_bb); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (exit_bb); gsi_remove (&gsi, true); FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always (); remove_edge (BRANCH_EDGE (entry_bb)); FALLTHRU_EDGE (cont_bb)->probability = profile_probability::always (); remove_edge (BRANCH_EDGE (cont_bb)); set_immediate_dominator (CDI_DOMINATORS, exit_bb, cont_bb); set_immediate_dominator (CDI_DOMINATORS, region->entry, recompute_dominator (CDI_DOMINATORS, region->entry)); } /* Taskloop construct is represented after gimplification with two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched in between them. This routine expands the inner GIMPLE_OMP_FOR. GOMP_taskloop{,_ull} function arranges for each task to be given just a single range of iterations. */ static void expand_omp_taskloop_for_inner (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree e, t, type, itype, vmain, vback, bias = NULL_TREE; basic_block entry_bb, exit_bb, body_bb, cont_bb, collapse_bb = NULL; basic_block fin_bb; gimple_stmt_iterator gsi; edge ep; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type)) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); fin_bb = BRANCH_EDGE (entry_bb)->dest; gcc_assert (broken_loop || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest)); body_bb = FALLTHRU_EDGE (entry_bb)->dest; if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } exit_bb = region->exit; /* Iteration space partitioning goes in ENTRY_BB. */ gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block l2_dom_bb = NULL, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); t = NULL_TREE; } else t = integer_one_node; step = fd->loop.step; tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); if (bias) { n1 = fold_build2 (PLUS_EXPR, fd->iter_type, n1, bias); n2 = fold_build2 (PLUS_EXPR, fd->iter_type, n2, bias); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_omp_for_clauses (inner_stmt); tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); } t = fold_convert (TREE_TYPE (startvar), n1); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); gimple *assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t = fold_convert (TREE_TYPE (startvar), n2); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } tree *nonrect_bounds = NULL; if (fd->collapse > 1) { if (fd->non_rect) { nonrect_bounds = XALLOCAVEC (tree, fd->last_nonrect + 1); memset (nonrect_bounds, 0, sizeof (tree) * (fd->last_nonrect + 1)); } gcc_assert (gsi_bb (gsi) == entry_bb); expand_omp_for_init_vars (fd, &gsi, counts, nonrect_bounds, inner_stmt, startvar); entry_bb = gsi_bb (gsi); } if (!broken_loop) { /* The code controlling the sequential loop replaces the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, nonrect_bounds, cont_bb, body_bb); } /* Remove the GIMPLE_OMP_FOR statement. */ gsi = gsi_for_stmt (fd->for_stmt); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_RETURN statement. */ gsi = gsi_last_nondebug_bb (exit_bb); gsi_remove (&gsi, true); FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always (); if (!broken_loop) remove_edge (BRANCH_EDGE (entry_bb)); else { remove_edge_and_dominated_blocks (BRANCH_EDGE (entry_bb)); region->outer->cont = NULL; } /* Connect all the blocks. */ if (!broken_loop) { ep = find_edge (cont_bb, body_bb); if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (ep); ep = NULL; } else if (fd->collapse > 1) { remove_edge (ep); ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else ep->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, fin_bb)->flags = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; } set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); if (!broken_loop) set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt)) { class loop *loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, body_bb->loop_father); } } /* A subroutine of expand_omp_for. Generate code for an OpenACC partitioned loop. The lowering here is abstracted, in that the loop parameters are passed through internal functions, which are further lowered by oacc_device_lower, once we get to the target compiler. The loop is of the form: for (V = B; V LTGT E; V += S) {BODY} where LTGT is < or >. We may have a specified chunking size, CHUNKING (constant 0 for no chunking) and we will have a GWV partitioning mask, specifying dimensions over which the loop is to be partitioned (see note below). We generate code that looks like (this ignores tiling): <entry_bb> [incoming FALL->body, BRANCH->exit] typedef signedintify (typeof (V)) T; // underlying signed integral type T range = E - B; T chunk_no = 0; T DIR = LTGT == '<' ? +1 : -1; T chunk_max = GOACC_LOOP_CHUNK (dir, range, S, CHUNK_SIZE, GWV); T step = GOACC_LOOP_STEP (dir, range, S, CHUNK_SIZE, GWV); <head_bb> [created by splitting end of entry_bb] T offset = GOACC_LOOP_OFFSET (dir, range, S, CHUNK_SIZE, GWV, chunk_no); T bound = GOACC_LOOP_BOUND (dir, range, S, CHUNK_SIZE, GWV, offset); if (!(offset LTGT bound)) goto bottom_bb; <body_bb> [incoming] V = B + offset; {BODY} <cont_bb> [incoming, may == body_bb FALL->exit_bb, BRANCH->body_bb] offset += step; if (offset LTGT bound) goto body_bb; [*] <bottom_bb> [created by splitting start of exit_bb] insert BRANCH->head_bb chunk_no++; if (chunk < chunk_max) goto head_bb; <exit_bb> [incoming] V = B + ((range -/+ 1) / S +/- 1) * S [*] [*] Needed if V live at end of loop. */ static void expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) { tree v = fd->loop.v; enum tree_code cond_code = fd->loop.cond_code; enum tree_code plus_code = PLUS_EXPR; tree chunk_size = integer_minus_one_node; tree gwv = integer_zero_node; tree iter_type = TREE_TYPE (v); tree diff_type = iter_type; tree plus_type = iter_type; struct oacc_collapse *counts = NULL; gcc_checking_assert (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP); gcc_assert (!gimple_omp_for_combined_into_p (fd->for_stmt)); gcc_assert (cond_code == LT_EXPR || cond_code == GT_EXPR); if (POINTER_TYPE_P (iter_type)) { plus_code = POINTER_PLUS_EXPR; plus_type = sizetype; } if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type)) diff_type = signed_type_for (diff_type); if (TYPE_PRECISION (diff_type) < TYPE_PRECISION (integer_type_node)) diff_type = integer_type_node; basic_block entry_bb = region->entry; /* BB ending in OMP_FOR */ basic_block exit_bb = region->exit; /* BB ending in OMP_RETURN */ basic_block cont_bb = region->cont; /* BB ending in OMP_CONTINUE */ basic_block bottom_bb = NULL; /* entry_bb has two successors; the branch edge is to the exit block, fallthrough edge to body. */ gcc_assert (EDGE_COUNT (entry_bb->succs) == 2 && BRANCH_EDGE (entry_bb)->dest == exit_bb); /* If cont_bb non-NULL, it has 2 successors. The branch successor is body_bb, or to a block whose only successor is the body_bb. Its fallthrough successor is the final block (same as the branch successor of the entry_bb). */ if (cont_bb) { basic_block body_bb = FALLTHRU_EDGE (entry_bb)->dest; basic_block bed = BRANCH_EDGE (cont_bb)->dest; gcc_assert (FALLTHRU_EDGE (cont_bb)->dest == exit_bb); gcc_assert (bed == body_bb || single_succ_edge (bed)->dest == body_bb); } else gcc_assert (!gimple_in_ssa_p (cfun)); /* The exit block only has entry_bb and cont_bb as predecessors. */ gcc_assert (EDGE_COUNT (exit_bb->preds) == 1 + (cont_bb != NULL)); tree chunk_no; tree chunk_max = NULL_TREE; tree bound, offset; tree step = create_tmp_var (diff_type, ".step"); bool up = cond_code == LT_EXPR; tree dir = build_int_cst (diff_type, up ? +1 : -1); bool chunking = !gimple_in_ssa_p (cfun); bool negating; /* Tiling vars. */ tree tile_size = NULL_TREE; tree element_s = NULL_TREE; tree e_bound = NULL_TREE, e_offset = NULL_TREE, e_step = NULL_TREE; basic_block elem_body_bb = NULL; basic_block elem_cont_bb = NULL; /* SSA instances. */ tree offset_incr = NULL_TREE; tree offset_init = NULL_TREE; gimple_stmt_iterator gsi; gassign *ass; gcall *call; gimple *stmt; tree expr; location_t loc; edge split, be, fte; /* Split the end of entry_bb to create head_bb. */ split = split_block (entry_bb, last_stmt (entry_bb)); basic_block head_bb = split->dest; entry_bb = split->src; /* Chunk setup goes at end of entry_bb, replacing the omp_for. */ gsi = gsi_last_nondebug_bb (entry_bb); gomp_for *for_stmt = as_a <gomp_for *> (gsi_stmt (gsi)); loc = gimple_location (for_stmt); if (gimple_in_ssa_p (cfun)) { offset_init = gimple_omp_for_index (for_stmt, 0); gcc_assert (integer_zerop (fd->loop.n1)); /* The SSA parallelizer does gang parallelism. */ gwv = build_int_cst (integer_type_node, GOMP_DIM_MASK (GOMP_DIM_GANG)); } if (fd->collapse > 1 || fd->tiling) { gcc_assert (!gimple_in_ssa_p (cfun) && up); counts = XALLOCAVEC (struct oacc_collapse, fd->collapse); tree total = expand_oacc_collapse_init (fd, &gsi, counts, TREE_TYPE (fd->loop.n2), loc); if (SSA_VAR_P (fd->loop.n2)) { total = force_gimple_operand_gsi (&gsi, total, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (fd->loop.n2, total); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); } } tree b = fd->loop.n1; tree e = fd->loop.n2; tree s = fd->loop.step; b = force_gimple_operand_gsi (&gsi, b, true, NULL_TREE, true, GSI_SAME_STMT); e = force_gimple_operand_gsi (&gsi, e, true, NULL_TREE, true, GSI_SAME_STMT); /* Convert the step, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (TREE_TYPE (s)); if (negating) s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s); s = fold_convert (diff_type, s); if (negating) s = fold_build1 (NEGATE_EXPR, diff_type, s); s = force_gimple_operand_gsi (&gsi, s, true, NULL_TREE, true, GSI_SAME_STMT); if (!chunking) chunk_size = integer_zero_node; expr = fold_convert (diff_type, chunk_size); chunk_size = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); if (fd->tiling) { /* Determine the tile size and element step, modify the outer loop step size. */ tile_size = create_tmp_var (diff_type, ".tile_size"); expr = build_int_cst (diff_type, 1); for (int ix = 0; ix < fd->collapse; ix++) expr = fold_build2 (MULT_EXPR, diff_type, counts[ix].tile, expr); expr = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (tile_size, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); element_s = create_tmp_var (diff_type, ".element_s"); ass = gimple_build_assign (element_s, s); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); expr = fold_build2 (MULT_EXPR, diff_type, s, tile_size); s = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); } /* Determine the range, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (iter_type); expr = fold_build2 (MINUS_EXPR, plus_type, fold_convert (plus_type, negating ? b : e), fold_convert (plus_type, negating ? e : b)); expr = fold_convert (diff_type, expr); if (negating) expr = fold_build1 (NEGATE_EXPR, diff_type, expr); tree range = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); chunk_no = build_int_cst (diff_type, 0); if (chunking) { gcc_assert (!gimple_in_ssa_p (cfun)); expr = chunk_no; chunk_max = create_tmp_var (diff_type, ".chunk_max"); chunk_no = create_tmp_var (diff_type, ".chunk_no"); ass = gimple_build_assign (chunk_no, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, build_int_cst (integer_type_node, IFN_GOACC_LOOP_CHUNKS), dir, range, s, chunk_size, gwv); gimple_call_set_lhs (call, chunk_max); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); } else chunk_size = chunk_no; call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, build_int_cst (integer_type_node, IFN_GOACC_LOOP_STEP), dir, range, s, chunk_size, gwv); gimple_call_set_lhs (call, step); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR. */ gsi_remove (&gsi, true); /* Fixup edges from head_bb. */ be = BRANCH_EDGE (head_bb); fte = FALLTHRU_EDGE (head_bb); be->flags |= EDGE_FALSE_VALUE; fte->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE; basic_block body_bb = fte->dest; if (gimple_in_ssa_p (cfun)) { gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); offset = gimple_omp_continue_control_use (cont_stmt); offset_incr = gimple_omp_continue_control_def (cont_stmt); } else { offset = create_tmp_var (diff_type, ".offset"); offset_init = offset_incr = offset; } bound = create_tmp_var (TREE_TYPE (offset), ".bound"); /* Loop offset & bound go into head_bb. */ gsi = gsi_start_bb (head_bb); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, build_int_cst (integer_type_node, IFN_GOACC_LOOP_OFFSET), dir, range, s, chunk_size, gwv, chunk_no); gimple_call_set_lhs (call, offset_init); gimple_set_location (call, loc); gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, build_int_cst (integer_type_node, IFN_GOACC_LOOP_BOUND), dir, range, s, chunk_size, gwv, offset_init); gimple_call_set_lhs (call, bound); gimple_set_location (call, loc); gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING); expr = build2 (cond_code, boolean_type_node, offset_init, bound); gsi_insert_after (&gsi, gimple_build_cond_empty (expr), GSI_CONTINUE_LINKING); /* V assignment goes into body_bb. */ if (!gimple_in_ssa_p (cfun)) { gsi = gsi_start_bb (body_bb); expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, offset)); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (v, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); if (fd->collapse > 1 || fd->tiling) expand_oacc_collapse_vars (fd, false, &gsi, counts, v); if (fd->tiling) { /* Determine the range of the element loop -- usually simply the tile_size, but could be smaller if the final iteration of the outer loop is a partial tile. */ tree e_range = create_tmp_var (diff_type, ".e_range"); expr = build2 (MIN_EXPR, diff_type, build2 (MINUS_EXPR, diff_type, bound, offset), build2 (MULT_EXPR, diff_type, tile_size, element_s)); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (e_range, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); /* Determine bound, offset & step of inner loop. */ e_bound = create_tmp_var (diff_type, ".e_bound"); e_offset = create_tmp_var (diff_type, ".e_offset"); e_step = create_tmp_var (diff_type, ".e_step"); /* Mark these as element loops. */ tree t, e_gwv = integer_minus_one_node; tree chunk = build_int_cst (diff_type, 0); /* Never chunked. */ t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_OFFSET); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range, element_s, chunk, e_gwv, chunk); gimple_call_set_lhs (call, e_offset); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_BOUND); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range, element_s, chunk, e_gwv, e_offset); gimple_call_set_lhs (call, e_bound); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_STEP); call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, t, dir, e_range, element_s, chunk, e_gwv); gimple_call_set_lhs (call, e_step); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); /* Add test and split block. */ expr = build2 (cond_code, boolean_type_node, e_offset, e_bound); stmt = gimple_build_cond_empty (expr); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); split = split_block (body_bb, stmt); elem_body_bb = split->dest; if (cont_bb == body_bb) cont_bb = elem_body_bb; body_bb = split->src; split->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE; /* Add a dummy exit for the tiled block when cont_bb is missing. */ if (cont_bb == NULL) { edge e = make_edge (body_bb, exit_bb, EDGE_FALSE_VALUE); e->probability = profile_probability::even (); split->probability = profile_probability::even (); } /* Initialize the user's loop vars. */ gsi = gsi_start_bb (elem_body_bb); expand_oacc_collapse_vars (fd, true, &gsi, counts, e_offset); } } /* Loop increment goes into cont_bb. If this is not a loop, we will have spawned threads as if it was, and each one will execute one iteration. The specification is not explicit about whether such constructs are ill-formed or not, and they can occur, especially when noreturn routines are involved. */ if (cont_bb) { gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); loc = gimple_location (cont_stmt); if (fd->tiling) { /* Insert element loop increment and test. */ expr = build2 (PLUS_EXPR, diff_type, e_offset, e_step); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (e_offset, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); expr = build2 (cond_code, boolean_type_node, e_offset, e_bound); stmt = gimple_build_cond_empty (expr); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); split = split_block (cont_bb, stmt); elem_cont_bb = split->src; cont_bb = split->dest; split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; split->probability = profile_probability::unlikely ().guessed (); edge latch_edge = make_edge (elem_cont_bb, elem_body_bb, EDGE_TRUE_VALUE); latch_edge->probability = profile_probability::likely ().guessed (); edge skip_edge = make_edge (body_bb, cont_bb, EDGE_FALSE_VALUE); skip_edge->probability = profile_probability::unlikely ().guessed (); edge loop_entry_edge = EDGE_SUCC (body_bb, 1 - skip_edge->dest_idx); loop_entry_edge->probability = profile_probability::likely ().guessed (); gsi = gsi_for_stmt (cont_stmt); } /* Increment offset. */ if (gimple_in_ssa_p (cfun)) expr = build2 (plus_code, iter_type, offset, fold_convert (plus_type, step)); else expr = build2 (PLUS_EXPR, diff_type, offset, step); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (offset_incr, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); expr = build2 (cond_code, boolean_type_node, offset_incr, bound); gsi_insert_before (&gsi, gimple_build_cond_empty (expr), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); /* Fixup edges from cont_bb. */ be = BRANCH_EDGE (cont_bb); fte = FALLTHRU_EDGE (cont_bb); be->flags |= EDGE_TRUE_VALUE; fte->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; if (chunking) { /* Split the beginning of exit_bb to make bottom_bb. We need to insert a nop at the start, because splitting is after a stmt, not before. */ gsi = gsi_start_bb (exit_bb); stmt = gimple_build_nop (); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); split = split_block (exit_bb, stmt); bottom_bb = split->src; exit_bb = split->dest; gsi = gsi_last_bb (bottom_bb); /* Chunk increment and test goes into bottom_bb. */ expr = build2 (PLUS_EXPR, diff_type, chunk_no, build_int_cst (diff_type, 1)); ass = gimple_build_assign (chunk_no, expr); gsi_insert_after (&gsi, ass, GSI_CONTINUE_LINKING); /* Chunk test at end of bottom_bb. */ expr = build2 (LT_EXPR, boolean_type_node, chunk_no, chunk_max); gsi_insert_after (&gsi, gimple_build_cond_empty (expr), GSI_CONTINUE_LINKING); /* Fixup edges from bottom_bb. */ split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; split->probability = profile_probability::unlikely ().guessed (); edge latch_edge = make_edge (bottom_bb, head_bb, EDGE_TRUE_VALUE); latch_edge->probability = profile_probability::likely ().guessed (); } } gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); loc = gimple_location (gsi_stmt (gsi)); if (!gimple_in_ssa_p (cfun)) { /* Insert the final value of V, in case it is live. This is the value for the only thread that survives past the join. */ expr = fold_build2 (MINUS_EXPR, diff_type, range, dir); expr = fold_build2 (PLUS_EXPR, diff_type, expr, s); expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s); expr = fold_build2 (MULT_EXPR, diff_type, expr, s); expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, expr)); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (v, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); } /* Remove the OMP_RETURN. */ gsi_remove (&gsi, true); if (cont_bb) { /* We now have one, two or three nested loops. Update the loop structures. */ class loop *parent = entry_bb->loop_father; class loop *body = body_bb->loop_father; if (chunking) { class loop *chunk_loop = alloc_loop (); chunk_loop->header = head_bb; chunk_loop->latch = bottom_bb; add_loop (chunk_loop, parent); parent = chunk_loop; } else if (parent != body) { gcc_assert (body->header == body_bb); gcc_assert (body->latch == cont_bb || single_pred (body->latch) == cont_bb); parent = NULL; } if (parent) { class loop *body_loop = alloc_loop (); body_loop->header = body_bb; body_loop->latch = cont_bb; add_loop (body_loop, parent); if (fd->tiling) { /* Insert tiling's element loop. */ class loop *inner_loop = alloc_loop (); inner_loop->header = elem_body_bb; inner_loop->latch = elem_cont_bb; add_loop (inner_loop, body_loop); } } } } /* Expand the OMP loop defined by REGION. */ static void expand_omp_for (struct omp_region *region, gimple *inner_stmt) { struct omp_for_data fd; struct omp_for_data_loop *loops; loops = XALLOCAVEC (struct omp_for_data_loop, gimple_omp_for_collapse (last_stmt (region->entry))); omp_extract_for_data (as_a <gomp_for *> (last_stmt (region->entry)), &fd, loops); region->sched_kind = fd.sched_kind; region->sched_modifiers = fd.sched_modifiers; region->has_lastprivate_conditional = fd.lastprivate_conditional != 0; if (fd.non_rect && !gimple_omp_for_combined_into_p (fd.for_stmt)) { for (int i = fd.first_nonrect; i <= fd.last_nonrect; i++) if ((loops[i].m1 || loops[i].m2) && (loops[i].m1 == NULL_TREE || TREE_CODE (loops[i].m1) == INTEGER_CST) && (loops[i].m2 == NULL_TREE || TREE_CODE (loops[i].m2) == INTEGER_CST) && TREE_CODE (loops[i].step) == INTEGER_CST && TREE_CODE (loops[i - loops[i].outer].step) == INTEGER_CST) { tree t; tree itype = TREE_TYPE (loops[i].v); if (loops[i].m1 && loops[i].m2) t = fold_build2 (MINUS_EXPR, itype, loops[i].m2, loops[i].m1); else if (loops[i].m1) t = fold_build1 (NEGATE_EXPR, itype, loops[i].m1); else t = loops[i].m2; t = fold_build2 (MULT_EXPR, itype, t, fold_convert (itype, loops[i - loops[i].outer].step)); if (TYPE_UNSIGNED (itype) && loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_MOD_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fold_convert (itype, loops[i].step))); else t = fold_build2 (TRUNC_MOD_EXPR, itype, t, fold_convert (itype, loops[i].step)); if (integer_nonzerop (t)) error_at (gimple_location (fd.for_stmt), "invalid OpenMP non-rectangular loop step; " "%<(%E - %E) * %E%> is not a multiple of loop %d " "step %qE", loops[i].m2 ? loops[i].m2 : integer_zero_node, loops[i].m1 ? loops[i].m1 : integer_zero_node, loops[i - loops[i].outer].step, i + 1, loops[i].step); } } gcc_assert (EDGE_COUNT (region->entry->succs) == 2); BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; if (region->cont) { gcc_assert (EDGE_COUNT (region->cont->succs) == 2); BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; } else /* If there isn't a continue then this is a degerate case where the introduction of abnormal edges during lowering will prevent original loops from being detected. Fix that up. */ loops_state_set (LOOPS_NEED_FIXUP); if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD) expand_omp_simd (region, &fd); else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP) { gcc_assert (!inner_stmt && !fd.non_rect); expand_oacc_for (region, &fd); } else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_TASKLOOP) { if (gimple_omp_for_combined_into_p (fd.for_stmt)) expand_omp_taskloop_for_inner (region, &fd, inner_stmt); else expand_omp_taskloop_for_outer (region, &fd, inner_stmt); } else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered) { if (fd.chunk_size == NULL) expand_omp_for_static_nochunk (region, &fd, inner_stmt); else expand_omp_for_static_chunk (region, &fd, inner_stmt); } else { int fn_index, start_ix, next_ix; unsigned HOST_WIDE_INT sched = 0; tree sched_arg = NULL_TREE; gcc_assert (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_FOR && !fd.non_rect); if (fd.chunk_size == NULL && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) fd.chunk_size = integer_zero_node; switch (fd.sched_kind) { case OMP_CLAUSE_SCHEDULE_RUNTIME: if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) != 0 && fd.lastprivate_conditional == 0) { gcc_assert (!fd.have_ordered); fn_index = 6; sched = 4; } else if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0 && !fd.have_ordered && fd.lastprivate_conditional == 0) fn_index = 7; else { fn_index = 3; sched = (HOST_WIDE_INT_1U << 31); } break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: case OMP_CLAUSE_SCHEDULE_GUIDED: if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0 && !fd.have_ordered && fd.lastprivate_conditional == 0) { fn_index = 3 + fd.sched_kind; sched = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_GUIDED) + 2; break; } fn_index = fd.sched_kind; sched = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_GUIDED) + 2; sched += (HOST_WIDE_INT_1U << 31); break; case OMP_CLAUSE_SCHEDULE_STATIC: gcc_assert (fd.have_ordered); fn_index = 0; sched = (HOST_WIDE_INT_1U << 31) + 1; break; default: gcc_unreachable (); } if (!fd.ordered) fn_index += fd.have_ordered * 8; if (fd.ordered) start_ix = ((int)BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START) + fn_index; else start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index; next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index; if (fd.have_reductemp || fd.have_pointer_condtemp) { if (fd.ordered) start_ix = (int)BUILT_IN_GOMP_LOOP_DOACROSS_START; else if (fd.have_ordered) start_ix = (int)BUILT_IN_GOMP_LOOP_ORDERED_START; else start_ix = (int)BUILT_IN_GOMP_LOOP_START; sched_arg = build_int_cstu (long_integer_type_node, sched); if (!fd.chunk_size) fd.chunk_size = integer_zero_node; } if (fd.iter_type == long_long_unsigned_type_node) { start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START - (int)BUILT_IN_GOMP_LOOP_STATIC_START); next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT); } expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix, (enum built_in_function) next_ix, sched_arg, inner_stmt); } if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_only_virtuals); } /* Expand code for an OpenMP sections directive. In pseudo code, we generate v = GOMP_sections_start (n); L0: switch (v) { case 0: goto L2; case 1: section 1; goto L1; case 2: ... case n: ... default: abort (); } L1: v = GOMP_sections_next (); goto L0; L2: reduction; If this is a combined parallel sections, replace the call to GOMP_sections_start with call to GOMP_sections_next. */ static void expand_omp_sections (struct omp_region *region) { tree t, u, vin = NULL, vmain, vnext, l2; unsigned len; basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb; gimple_stmt_iterator si, switch_si; gomp_sections *sections_stmt; gimple *stmt; gomp_continue *cont; edge_iterator ei; edge e; struct omp_region *inner; unsigned i, casei; bool exit_reachable = region->cont != NULL; gcc_assert (region->exit != NULL); entry_bb = region->entry; l0_bb = single_succ (entry_bb); l1_bb = region->cont; l2_bb = region->exit; if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb) l2 = gimple_block_label (l2_bb); else { /* This can happen if there are reductions. */ len = EDGE_COUNT (l0_bb->succs); gcc_assert (len > 0); e = EDGE_SUCC (l0_bb, len - 1); si = gsi_last_nondebug_bb (e->dest); l2 = NULL_TREE; if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) l2 = gimple_block_label (e->dest); else FOR_EACH_EDGE (e, ei, l0_bb->succs) { si = gsi_last_nondebug_bb (e->dest); if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) { l2 = gimple_block_label (e->dest); break; } } } if (exit_reachable) default_bb = create_empty_bb (l1_bb->prev_bb); else default_bb = create_empty_bb (l0_bb); /* We will build a switch() with enough cases for all the GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work and a default case to abort if something goes wrong. */ len = EDGE_COUNT (l0_bb->succs); /* Use vec::quick_push on label_vec throughout, since we know the size in advance. */ auto_vec<tree> label_vec (len); /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the GIMPLE_OMP_SECTIONS statement. */ si = gsi_last_nondebug_bb (entry_bb); sections_stmt = as_a <gomp_sections *> (gsi_stmt (si)); gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS); vin = gimple_omp_sections_control (sections_stmt); tree clauses = gimple_omp_sections_clauses (sections_stmt); tree reductmp = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_); tree condtmp = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_); tree cond_var = NULL_TREE; if (reductmp || condtmp) { tree reductions = null_pointer_node, mem = null_pointer_node; tree memv = NULL_TREE, condtemp = NULL_TREE; gimple_stmt_iterator gsi = gsi_none (); gimple *g = NULL; if (reductmp) { reductions = OMP_CLAUSE_DECL (reductmp); gcc_assert (TREE_CODE (reductions) == SSA_NAME); g = SSA_NAME_DEF_STMT (reductions); reductions = gimple_assign_rhs1 (g); OMP_CLAUSE_DECL (reductmp) = reductions; gsi = gsi_for_stmt (g); } else gsi = si; if (condtmp) { condtemp = OMP_CLAUSE_DECL (condtmp); tree c = omp_find_clause (OMP_CLAUSE_CHAIN (condtmp), OMP_CLAUSE__CONDTEMP_); cond_var = OMP_CLAUSE_DECL (c); tree type = TREE_TYPE (condtemp); memv = create_tmp_var (type); TREE_ADDRESSABLE (memv) = 1; unsigned cnt = 0; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)) ++cnt; unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))) * cnt; expand_omp_build_assign (&gsi, memv, build_int_cst (type, sz), false); mem = build_fold_addr_expr (memv); } t = build_int_cst (unsigned_type_node, len - 1); u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS2_START); stmt = gimple_build_call (u, 3, t, reductions, mem); gimple_call_set_lhs (stmt, vin); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); if (condtmp) { expand_omp_build_assign (&gsi, condtemp, memv, false); tree t = build2 (PLUS_EXPR, TREE_TYPE (cond_var), vin, build_one_cst (TREE_TYPE (cond_var))); expand_omp_build_assign (&gsi, cond_var, t, false); } if (reductmp) { gsi_remove (&gsi, true); release_ssa_name (gimple_assign_lhs (g)); } } else if (!is_combined_parallel (region)) { /* If we are not inside a combined parallel+sections region, call GOMP_sections_start. */ t = build_int_cst (unsigned_type_node, len - 1); u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START); stmt = gimple_build_call (u, 1, t); } else { /* Otherwise, call GOMP_sections_next. */ u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (u, 0); } if (!reductmp && !condtmp) { gimple_call_set_lhs (stmt, vin); gsi_insert_after (&si, stmt, GSI_SAME_STMT); } gsi_remove (&si, true); /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in L0_BB. */ switch_si = gsi_last_nondebug_bb (l0_bb); gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH); if (exit_reachable) { cont = as_a <gomp_continue *> (last_stmt (l1_bb)); gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont); vnext = gimple_omp_continue_control_def (cont); } else { vmain = vin; vnext = NULL_TREE; } t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2); label_vec.quick_push (t); i = 1; /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */ for (inner = region->inner, casei = 1; inner; inner = inner->next, i++, casei++) { basic_block s_entry_bb, s_exit_bb; /* Skip optional reduction region. */ if (inner->type == GIMPLE_OMP_ATOMIC_LOAD) { --i; --casei; continue; } s_entry_bb = inner->entry; s_exit_bb = inner->exit; t = gimple_block_label (s_entry_bb); u = build_int_cst (unsigned_type_node, casei); u = build_case_label (u, NULL, t); label_vec.quick_push (u); si = gsi_last_nondebug_bb (s_entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION); gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si))); gsi_remove (&si, true); single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU; if (s_exit_bb == NULL) continue; si = gsi_last_nondebug_bb (s_exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU; } /* Error handling code goes in DEFAULT_BB. */ t = gimple_block_label (default_bb); u = build_case_label (NULL, NULL, t); make_edge (l0_bb, default_bb, 0); add_bb_to_loop (default_bb, current_loops->tree_root); stmt = gimple_build_switch (vmain, u, label_vec); gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT); gsi_remove (&switch_si, true); si = gsi_start_bb (default_bb); stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING); if (exit_reachable) { tree bfn_decl; /* Code to get the next section goes in L1_BB. */ si = gsi_last_nondebug_bb (l1_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE); bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (bfn_decl, 0); gimple_call_set_lhs (stmt, vnext); gsi_insert_before (&si, stmt, GSI_SAME_STMT); if (cond_var) { tree t = build2 (PLUS_EXPR, TREE_TYPE (cond_var), vnext, build_one_cst (TREE_TYPE (cond_var))); expand_omp_build_assign (&si, cond_var, t, false); } gsi_remove (&si, true); single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU; } /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */ si = gsi_last_nondebug_bb (l2_bb); if (gimple_omp_return_nowait_p (gsi_stmt (si))) t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT); else if (gimple_omp_return_lhs (gsi_stmt (si))) t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL); else t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END); stmt = gimple_build_call (t, 0); if (gimple_omp_return_lhs (gsi_stmt (si))) gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si))); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb); } /* Expand code for an OpenMP single directive. We've already expanded much of the code, here we simply place the GOMP_barrier call. */ static void expand_omp_single (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE); gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; si = gsi_last_nondebug_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (si))) { tree t = gimple_omp_return_lhs (gsi_stmt (si)); gsi_insert_after (&si, omp_build_barrier (t), GSI_SAME_STMT); } gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } /* Generic expansion for OpenMP synchronization directives: master, ordered and critical. All we need to do here is remove the entry and exit markers for REGION. */ static void expand_omp_synch (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS); if (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS && gimple_omp_teams_host (as_a <gomp_teams *> (gsi_stmt (si)))) { expand_omp_taskreg (region); return; } gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; if (exit_bb) { si = gsi_last_nondebug_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } } /* Translate enum omp_memory_order to enum memmodel. The two enums are using different numbers so that OMP_MEMORY_ORDER_UNSPECIFIED is 0. */ static enum memmodel omp_memory_order_to_memmodel (enum omp_memory_order mo) { switch (mo) { case OMP_MEMORY_ORDER_RELAXED: return MEMMODEL_RELAXED; case OMP_MEMORY_ORDER_ACQUIRE: return MEMMODEL_ACQUIRE; case OMP_MEMORY_ORDER_RELEASE: return MEMMODEL_RELEASE; case OMP_MEMORY_ORDER_ACQ_REL: return MEMMODEL_ACQ_REL; case OMP_MEMORY_ORDER_SEQ_CST: return MEMMODEL_SEQ_CST; default: gcc_unreachable (); } } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile load. */ static bool expand_omp_atomic_load (basic_block load_bb, tree addr, tree loaded_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb; location_t loc; gimple *stmt; tree decl, call, type, itype; gsi = gsi_last_nondebug_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_load_optab[mode], and mode is smaller than word size, then expand_atomic_load assumes that the load is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (loaded_val); itype = TREE_TYPE (TREE_TYPE (decl)); enum omp_memory_order omo = gimple_omp_atomic_memory_order (stmt); tree mo = build_int_cst (NULL, omp_memory_order_to_memmodel (omo)); call = build_call_expr_loc (loc, decl, 2, addr, mo); if (!useless_type_conversion_p (type, itype)) call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); store_bb = single_succ (load_bb); gsi = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile store. */ static bool expand_omp_atomic_store (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb = single_succ (load_bb); location_t loc; gimple *stmt; tree decl, call, type, itype; machine_mode imode; bool exchange; gsi = gsi_last_nondebug_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); /* If the load value is needed, then this isn't a store but an exchange. */ exchange = gimple_omp_atomic_need_value_p (stmt); gsi = gsi_last_nondebug_bb (store_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_store_optab[mode], and mode is smaller than word size, then expand_atomic_store assumes that the store is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N); tmpbase = (enum built_in_function) ((int) tmpbase + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (stored_val); /* Dig out the type of the function's second argument. */ itype = TREE_TYPE (decl); itype = TYPE_ARG_TYPES (itype); itype = TREE_CHAIN (itype); itype = TREE_VALUE (itype); imode = TYPE_MODE (itype); if (exchange && !can_atomic_exchange_p (imode, true)) return false; if (!useless_type_conversion_p (itype, type)) stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val); enum omp_memory_order omo = gimple_omp_atomic_memory_order (stmt); tree mo = build_int_cst (NULL, omp_memory_order_to_memmodel (omo)); call = build_call_expr_loc (loc, decl, 3, addr, stored_val, mo); if (exchange) { if (!useless_type_conversion_p (type, itype)) call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); } force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */ gsi = gsi_last_nondebug_bb (load_bb); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a __atomic_fetch_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns false if the expression is not of the proper form. */ static bool expand_omp_atomic_fetch_op (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function oldbase, newbase, tmpbase; tree decl, itype, call; tree lhs, rhs; basic_block store_bb = single_succ (load_bb); gimple_stmt_iterator gsi; gimple *stmt; location_t loc; enum tree_code code; bool need_old, need_new; machine_mode imode; /* We expect to find the following sequences: load_bb: GIMPLE_OMP_ATOMIC_LOAD (tmp, mem) store_bb: val = tmp OP something; (or: something OP tmp) GIMPLE_OMP_STORE (val) ???FIXME: Allow a more flexible sequence. Perhaps use data flow to pick the statements. */ gsi = gsi_after_labels (store_bb); stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) { gsi_next_nondebug (&gsi); if (gsi_end_p (gsi)) return false; stmt = gsi_stmt (gsi); } loc = gimple_location (stmt); if (!is_gimple_assign (stmt)) return false; gsi_next_nondebug (&gsi); if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE) return false; need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi)); need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb)); enum omp_memory_order omo = gimple_omp_atomic_memory_order (last_stmt (load_bb)); enum memmodel mo = omp_memory_order_to_memmodel (omo); gcc_checking_assert (!need_old || !need_new); if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0)) return false; /* Check for one of the supported fetch-op operations. */ code = gimple_assign_rhs_code (stmt); switch (code) { case PLUS_EXPR: case POINTER_PLUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N; newbase = BUILT_IN_ATOMIC_ADD_FETCH_N; break; case MINUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N; newbase = BUILT_IN_ATOMIC_SUB_FETCH_N; break; case BIT_AND_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_AND_N; newbase = BUILT_IN_ATOMIC_AND_FETCH_N; break; case BIT_IOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_OR_N; newbase = BUILT_IN_ATOMIC_OR_FETCH_N; break; case BIT_XOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N; newbase = BUILT_IN_ATOMIC_XOR_FETCH_N; break; default: return false; } /* Make sure the expression is of the proper form. */ if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs2 (stmt); else if (commutative_tree_code (gimple_assign_rhs_code (stmt)) && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs1 (stmt); else return false; tmpbase = ((enum built_in_function) ((need_new ? newbase : oldbase) + index + 1)); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; itype = TREE_TYPE (TREE_TYPE (decl)); imode = TYPE_MODE (itype); /* We could test all of the various optabs involved, but the fact of the matter is that (with the exception of i486 vs i586 and xadd) all targets that support any atomic operaton optab also implements compare-and-swap. Let optabs.c take care of expanding any compare-and-swap loop. */ if (!can_compare_and_swap_p (imode, true) || !can_atomic_load_p (imode)) return false; gsi = gsi_last_nondebug_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD); /* OpenMP does not imply any barrier-like semantics on its atomic ops. It only requires that the operation happen atomically. Thus we can use the RELAXED memory model. */ call = build_call_expr_loc (loc, decl, 3, addr, fold_convert_loc (loc, itype, rhs), build_int_cst (NULL, mo)); if (need_old || need_new) { lhs = need_old ? loaded_val : stored_val; call = fold_convert_loc (loc, TREE_TYPE (lhs), call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call); } else call = fold_convert_loc (loc, void_type_node, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (store_bb); stmt = gsi_stmt (gsi); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) { release_defs (stmt); update_ssa (TODO_update_ssa_no_phi); } return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static bool expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val, int index) { tree loadedi, storedi, initial, new_storedi, old_vali; tree type, itype, cmpxchg, iaddr, atype; gimple_stmt_iterator si; basic_block loop_header = single_succ (load_bb); gimple *phi, *stmt; edge e; enum built_in_function fncode; /* ??? We need a non-pointer interface to __atomic_compare_exchange in order to use the RELAXED memory model effectively. */ fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N + index + 1); cmpxchg = builtin_decl_explicit (fncode); if (cmpxchg == NULL_TREE) return false; type = TYPE_MAIN_VARIANT (TREE_TYPE (loaded_val)); atype = type; itype = TREE_TYPE (TREE_TYPE (cmpxchg)); if (!can_compare_and_swap_p (TYPE_MODE (itype), true) || !can_atomic_load_p (TYPE_MODE (itype))) return false; /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */ si = gsi_last_nondebug_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) { tree iaddr_val; iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode, true)); atype = itype; iaddr_val = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (iaddr), addr), false, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (iaddr, iaddr_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); loadedi = create_tmp_var (itype); if (gimple_in_ssa_p (cfun)) loadedi = make_ssa_name (loadedi); } else { iaddr = addr; loadedi = loaded_val; } fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1); tree loaddecl = builtin_decl_explicit (fncode); if (loaddecl) initial = fold_convert (atype, build_call_expr (loaddecl, 2, iaddr, build_int_cst (NULL_TREE, MEMMODEL_RELAXED))); else { tree off = build_int_cst (build_pointer_type_for_mode (atype, ptr_mode, true), 0); initial = build2 (MEM_REF, atype, iaddr, off); } initial = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true, GSI_SAME_STMT); /* Move the value to the LOADEDI temporary. */ if (gimple_in_ssa_p (cfun)) { gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header))); phi = create_phi_node (loadedi, loop_header); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)), initial); } else gsi_insert_before (&si, gimple_build_assign (loadedi, initial), GSI_SAME_STMT); if (loadedi != loaded_val) { gimple_stmt_iterator gsi2; tree x; x = build1 (VIEW_CONVERT_EXPR, type, loadedi); gsi2 = gsi_start_bb (loop_header); if (gimple_in_ssa_p (cfun)) { gassign *stmt; x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (loaded_val, x); gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT); } else { x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x); force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); } } gsi_remove (&si, true); si = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); if (iaddr == addr) storedi = stored_val; else storedi = force_gimple_operand_gsi (&si, build1 (VIEW_CONVERT_EXPR, itype, stored_val), true, NULL_TREE, true, GSI_SAME_STMT); /* Build the compare&swap statement. */ new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi); new_storedi = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (loadedi), new_storedi), true, NULL_TREE, true, GSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) old_vali = loadedi; else { old_vali = create_tmp_var (TREE_TYPE (loadedi)); stmt = gimple_build_assign (old_vali, loadedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); stmt = gimple_build_assign (loadedi, new_storedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ tree ne = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali); stmt = gimple_build_cond_empty (ne); gsi_insert_before (&si, stmt, GSI_SAME_STMT); /* Update cfg. */ e = single_succ_edge (store_bb); e->flags &= ~EDGE_FALLTHRU; e->flags |= EDGE_FALSE_VALUE; /* Expect no looping. */ e->probability = profile_probability::guessed_always (); e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE); e->probability = profile_probability::guessed_never (); /* Copy the new value to loadedi (we already did that before the condition if we are not in SSA). */ if (gimple_in_ssa_p (cfun)) { phi = gimple_seq_first_stmt (phi_nodes (loop_header)); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi); } /* Remove GIMPLE_OMP_ATOMIC_STORE. */ gsi_remove (&si, true); class loop *loop = alloc_loop (); loop->header = loop_header; loop->latch = store_bb; add_loop (loop, loop_header->loop_father); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're expanding. STORED_VAL is the operand of the matching GIMPLE_OMP_ATOMIC_STORE. We replace GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with loaded_val = *addr; and replace GIMPLE_OMP_ATOMIC_STORE (stored_val) with *addr = stored_val; */ static bool expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val) { gimple_stmt_iterator si; gassign *stmt; tree t; si = gsi_last_nondebug_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); tree mem = build_simple_mem_ref (addr); TREE_TYPE (mem) = TREE_TYPE (loaded_val); TREE_OPERAND (mem, 1) = fold_convert (build_pointer_type_for_mode (TREE_TYPE (mem), ptr_mode, true), TREE_OPERAND (mem, 1)); stmt = gimple_build_assign (loaded_val, mem); gsi_insert_before (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); si = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); stmt = gimple_build_assign (unshare_expr (mem), stored_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&si, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand using expand_omp_atomic_fetch_op. If it failed, we try to call expand_omp_atomic_pipeline, and if it fails too, the ultimate fallback is wrapping the operation in a mutex (expand_omp_atomic_mutex). REGION is the atomic region built by build_omp_regions_1(). */ static void expand_omp_atomic (struct omp_region *region) { basic_block load_bb = region->entry, store_bb = region->exit; gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb)); gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb)); tree loaded_val = gimple_omp_atomic_load_lhs (load); tree addr = gimple_omp_atomic_load_rhs (load); tree stored_val = gimple_omp_atomic_store_val (store); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (loaded_val)); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_to_uhwi (TYPE_SIZE_UNIT (type)); index = exact_log2 (index); if (index >= 0 && index <= 4) { unsigned int align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* Atomic load. */ scalar_mode smode; if (loaded_val == stored_val && (is_int_mode (TYPE_MODE (type), &smode) || is_float_mode (TYPE_MODE (type), &smode)) && GET_MODE_BITSIZE (smode) <= BITS_PER_WORD && expand_omp_atomic_load (load_bb, addr, loaded_val, index)) return; /* Atomic store. */ if ((is_int_mode (TYPE_MODE (type), &smode) || is_float_mode (TYPE_MODE (type), &smode)) && GET_MODE_BITSIZE (smode) <= BITS_PER_WORD && store_bb == single_succ (load_bb) && first_stmt (store_bb) == store && expand_omp_atomic_store (load_bb, addr, loaded_val, stored_val, index)) return; /* When possible, use specialized atomic update functions. */ if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) && store_bb == single_succ (load_bb) && expand_omp_atomic_fetch_op (load_bb, addr, loaded_val, stored_val, index)) return; /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ if (expand_omp_atomic_pipeline (load_bb, store_bb, addr, loaded_val, stored_val, index)) return; } } /* The ultimate fallback is wrapping the operation in a mutex. */ expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val); } /* Mark the loops inside the kernels region starting at REGION_ENTRY and ending at REGION_EXIT. */ static void mark_loops_in_oacc_kernels_region (basic_block region_entry, basic_block region_exit) { class loop *outer = region_entry->loop_father; gcc_assert (region_exit == NULL || outer == region_exit->loop_father); /* Don't parallelize the kernels region if it contains more than one outer loop. */ unsigned int nr_outer_loops = 0; class loop *single_outer = NULL; for (class loop *loop = outer->inner; loop != NULL; loop = loop->next) { gcc_assert (loop_outer (loop) == outer); if (!dominated_by_p (CDI_DOMINATORS, loop->header, region_entry)) continue; if (region_exit != NULL && dominated_by_p (CDI_DOMINATORS, loop->header, region_exit)) continue; nr_outer_loops++; single_outer = loop; } if (nr_outer_loops != 1) return; for (class loop *loop = single_outer->inner; loop != NULL; loop = loop->inner) if (loop->next) return; /* Mark the loops in the region. */ for (class loop *loop = single_outer; loop != NULL; loop = loop->inner) loop->in_oacc_kernels_region = true; } /* Build target argument identifier from the DEVICE identifier, value identifier ID and whether the element also has a SUBSEQUENT_PARAM. */ static tree get_target_argument_identifier_1 (int device, bool subseqent_param, int id) { tree t = build_int_cst (integer_type_node, device); if (subseqent_param) t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t, build_int_cst (integer_type_node, GOMP_TARGET_ARG_SUBSEQUENT_PARAM)); t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t, build_int_cst (integer_type_node, id)); return t; } /* Like above but return it in type that can be directly stored as an element of the argument array. */ static tree get_target_argument_identifier (int device, bool subseqent_param, int id) { tree t = get_target_argument_identifier_1 (device, subseqent_param, id); return fold_convert (ptr_type_node, t); } /* Return a target argument consisting of DEVICE identifier, value identifier ID, and the actual VALUE. */ static tree get_target_argument_value (gimple_stmt_iterator *gsi, int device, int id, tree value) { tree t = fold_build2 (LSHIFT_EXPR, integer_type_node, fold_convert (integer_type_node, value), build_int_cst (unsigned_type_node, GOMP_TARGET_ARG_VALUE_SHIFT)); t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t, get_target_argument_identifier_1 (device, false, id)); t = fold_convert (ptr_type_node, t); return force_gimple_operand_gsi (gsi, t, true, NULL, true, GSI_SAME_STMT); } /* If VALUE is an integer constant greater than -2^15 and smaller than 2^15, push one argument to ARGS with both the DEVICE, ID and VALUE embedded in it, otherwise push an identifier (with DEVICE and ID) and the VALUE in two arguments. */ static void push_target_argument_according_to_value (gimple_stmt_iterator *gsi, int device, int id, tree value, vec <tree> *args) { if (tree_fits_shwi_p (value) && tree_to_shwi (value) > -(1 << 15) && tree_to_shwi (value) < (1 << 15)) args->quick_push (get_target_argument_value (gsi, device, id, value)); else { args->quick_push (get_target_argument_identifier (device, true, id)); value = fold_convert (ptr_type_node, value); value = force_gimple_operand_gsi (gsi, value, true, NULL, true, GSI_SAME_STMT); args->quick_push (value); } } /* Create an array of arguments that is then passed to GOMP_target. */ static tree get_target_arguments (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt) { auto_vec <tree, 6> args; tree clauses = gimple_omp_target_clauses (tgt_stmt); tree t, c = omp_find_clause (clauses, OMP_CLAUSE_NUM_TEAMS); if (c) t = OMP_CLAUSE_NUM_TEAMS_EXPR (c); else t = integer_minus_one_node; push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL, GOMP_TARGET_ARG_NUM_TEAMS, t, &args); c = omp_find_clause (clauses, OMP_CLAUSE_THREAD_LIMIT); if (c) t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c); else t = integer_minus_one_node; push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL, GOMP_TARGET_ARG_THREAD_LIMIT, t, &args); /* Produce more, perhaps device specific, arguments here. */ tree argarray = create_tmp_var (build_array_type_nelts (ptr_type_node, args.length () + 1), ".omp_target_args"); for (unsigned i = 0; i < args.length (); i++) { tree ref = build4 (ARRAY_REF, ptr_type_node, argarray, build_int_cst (integer_type_node, i), NULL_TREE, NULL_TREE); gsi_insert_before (gsi, gimple_build_assign (ref, args[i]), GSI_SAME_STMT); } tree ref = build4 (ARRAY_REF, ptr_type_node, argarray, build_int_cst (integer_type_node, args.length ()), NULL_TREE, NULL_TREE); gsi_insert_before (gsi, gimple_build_assign (ref, null_pointer_node), GSI_SAME_STMT); TREE_ADDRESSABLE (argarray) = 1; return build_fold_addr_expr (argarray); } /* Expand the GIMPLE_OMP_TARGET starting at REGION. */ static void expand_omp_target (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t; gimple_stmt_iterator gsi; gomp_target *entry_stmt; gimple *stmt; edge e; bool offloaded, data_region; int target_kind; entry_stmt = as_a <gomp_target *> (last_stmt (region->entry)); target_kind = gimple_omp_target_kind (entry_stmt); new_bb = region->entry; offloaded = is_gimple_omp_offloaded (entry_stmt); switch (target_kind) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_ENTER_DATA: case GF_OMP_TARGET_KIND_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_SERIAL: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_DECLARE: data_region = false; break; case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: data_region = true; break; default: gcc_unreachable (); } child_fn = NULL_TREE; child_cfun = NULL; if (offloaded) { child_fn = gimple_omp_target_child_fn (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); } /* Supported by expand_omp_taskreg, but not here. */ if (child_cfun != NULL) gcc_checking_assert (!child_cfun->cfg); gcc_checking_assert (!gimple_in_ssa_p (cfun)); entry_bb = region->entry; exit_bb = region->exit; switch (target_kind) { case GF_OMP_TARGET_KIND_OACC_KERNELS: mark_loops_in_oacc_kernels_region (region->entry, region->exit); /* Further down, all OpenACC compute constructs will be mapped to BUILT_IN_GOACC_PARALLEL, and to distinguish between them, there is an "oacc kernels" attribute set for OpenACC kernels. */ DECL_ATTRIBUTES (child_fn) = tree_cons (get_identifier ("oacc kernels"), NULL_TREE, DECL_ATTRIBUTES (child_fn)); break; case GF_OMP_TARGET_KIND_OACC_SERIAL: /* Further down, all OpenACC compute constructs will be mapped to BUILT_IN_GOACC_PARALLEL, and to distinguish between them, there is an "oacc serial" attribute set for OpenACC serial. */ DECL_ATTRIBUTES (child_fn) = tree_cons (get_identifier ("oacc serial"), NULL_TREE, DECL_ATTRIBUTES (child_fn)); break; default: break; } if (offloaded) { unsigned srcidx, dstidx, num; /* If the offloading region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the offloading body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ tree data_arg = gimple_omp_target_data_arg (entry_stmt); if (data_arg) { basic_block entry_succ_bb = single_succ (entry_bb); gimple_stmt_iterator gsi; tree arg; gimple *tgtcopy_stmt = NULL; tree sender = TREE_VEC_ELT (data_arg, 0); for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) { gcc_assert (!gsi_end_p (gsi)); stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_ASSIGN) continue; if (gimple_num_ops (stmt) == 2) { tree arg = gimple_assign_rhs1 (stmt); /* We're ignoring the subcode because we're effectively doing a STRIP_NOPS. */ if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == sender) { tgtcopy_stmt = stmt; break; } } } gcc_assert (tgtcopy_stmt != NULL); arg = DECL_ARGUMENTS (child_fn); gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg); gsi_remove (&gsi, true); } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); /* The gimplifier could record temporaries in the offloading block rather than in containing function's local_decls chain, which would mean cgraph missed finalizing them. Do it now. */ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t)) varpool_node::finalize_decl (t); DECL_SAVED_TREE (child_fn) = NULL; /* We'll create a CFG for child_fn, so no gimple body is needed. */ gimple_set_body (child_fn, NULL); TREE_USED (block) = 1; /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at GIMPLE_*, so that it can be moved to the child function. */ gsi = gsi_last_nondebug_bb (entry_bb); stmt = gsi_stmt (gsi); gcc_assert (stmt && gimple_code (stmt) == gimple_code (entry_stmt)); e = split_block (entry_bb, stmt); gsi_remove (&gsi, true); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); } /* Move the offloading region into CHILD_CFUN. */ block = gimple_block (entry_stmt); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; /* When the OMP expansion process cannot guarantee an up-to-date loop tree arrange for the child function to fixup loops. */ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ num = vec_safe_length (child_cfun->local_decls); for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) { t = (*child_cfun->local_decls)[srcidx]; if (DECL_CONTEXT (t) == cfun->decl) continue; if (srcidx != dstidx) (*child_cfun->local_decls)[dstidx] = t; dstidx++; } if (dstidx != num) vec_safe_truncate (child_cfun->local_decls, dstidx); /* Inform the callgraph about the new function. */ child_cfun->curr_properties = cfun->curr_properties; child_cfun->has_simduid_loops |= cfun->has_simduid_loops; child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops; cgraph_node *node = cgraph_node::get_create (child_fn); node->parallelized_function = 1; cgraph_node::add_new_function (child_fn, true); /* Add the new function to the offload table. */ if (ENABLE_OFFLOADING) { if (in_lto_p) DECL_PRESERVE_P (child_fn) = 1; vec_safe_push (offload_funcs, child_fn); } bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl) && !DECL_ASSEMBLER_NAME_SET_P (child_fn); /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); if (need_asm) assign_assembler_name_if_needed (child_fn); cgraph_edge::rebuild_edges (); /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; bool changed = false; FOR_EACH_BB_FN (bb, cfun) changed |= gimple_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); } if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) verify_loop_structure (); pop_cfun (); if (dump_file && !gimple_in_ssa_p (cfun)) { omp_any_child_fn_dumped = true; dump_function_header (dump_file, child_fn, dump_flags); dump_function_to_file (child_fn, dump_file, dump_flags); } adjust_context_and_scope (region, gimple_block (entry_stmt), child_fn); } /* Emit a library call to launch the offloading region, or do data transfers. */ tree t1, t2, t3, t4, depend, c, clauses; enum built_in_function start_ix; unsigned int flags_i = 0; switch (gimple_omp_target_kind (entry_stmt)) { case GF_OMP_TARGET_KIND_REGION: start_ix = BUILT_IN_GOMP_TARGET; break; case GF_OMP_TARGET_KIND_DATA: start_ix = BUILT_IN_GOMP_TARGET_DATA; break; case GF_OMP_TARGET_KIND_UPDATE: start_ix = BUILT_IN_GOMP_TARGET_UPDATE; break; case GF_OMP_TARGET_KIND_ENTER_DATA: start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA; break; case GF_OMP_TARGET_KIND_EXIT_DATA: start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA; flags_i |= GOMP_TARGET_FLAG_EXIT_DATA; break; case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_SERIAL: start_ix = BUILT_IN_GOACC_PARALLEL; break; case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: start_ix = BUILT_IN_GOACC_DATA_START; break; case GF_OMP_TARGET_KIND_OACC_UPDATE: start_ix = BUILT_IN_GOACC_UPDATE; break; case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA; break; case GF_OMP_TARGET_KIND_OACC_DECLARE: start_ix = BUILT_IN_GOACC_DECLARE; break; default: gcc_unreachable (); } clauses = gimple_omp_target_clauses (entry_stmt); tree device = NULL_TREE; location_t device_loc = UNKNOWN_LOCATION; tree goacc_flags = NULL_TREE; if (is_gimple_omp_oacc (entry_stmt)) { /* By default, no GOACC_FLAGs are set. */ goacc_flags = integer_zero_node; } else { c = omp_find_clause (clauses, OMP_CLAUSE_DEVICE); if (c) { device = OMP_CLAUSE_DEVICE_ID (c); device_loc = OMP_CLAUSE_LOCATION (c); } else { /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime library choose). */ device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV); device_loc = gimple_location (entry_stmt); } c = omp_find_clause (clauses, OMP_CLAUSE_NOWAIT); if (c) flags_i |= GOMP_TARGET_FLAG_NOWAIT; } /* By default, there is no conditional. */ tree cond = NULL_TREE; c = omp_find_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); /* If we found the clause 'if (cond)', build: OpenACC: goacc_flags = (cond ? goacc_flags : flags | GOACC_FLAG_HOST_FALLBACK) OpenMP: device = (cond ? device : GOMP_DEVICE_HOST_FALLBACK) */ if (cond) { tree *tp; if (is_gimple_omp_oacc (entry_stmt)) tp = &goacc_flags; else { /* Ensure 'device' is of the correct type. */ device = fold_convert_loc (device_loc, integer_type_node, device); tp = &device; } cond = gimple_boolify (cond); basic_block cond_bb, then_bb, else_bb; edge e; tree tmp_var; tmp_var = create_tmp_var (TREE_TYPE (*tp)); if (offloaded) e = split_block_after_labels (new_bb); else { gsi = gsi_last_nondebug_bb (new_bb); gsi_prev (&gsi); e = split_block (new_bb, gsi_stmt (gsi)); } cond_bb = e->src; new_bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); stmt = gimple_build_cond_empty (cond); gsi = gsi_last_bb (cond_bb); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); stmt = gimple_build_assign (tmp_var, *tp); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (else_bb); if (is_gimple_omp_oacc (entry_stmt)) stmt = gimple_build_assign (tmp_var, BIT_IOR_EXPR, *tp, build_int_cst (integer_type_node, GOACC_FLAG_HOST_FALLBACK)); else stmt = gimple_build_assign (tmp_var, build_int_cst (integer_type_node, GOMP_DEVICE_HOST_FALLBACK)); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); add_bb_to_loop (then_bb, cond_bb->loop_father); add_bb_to_loop (else_bb, cond_bb->loop_father); make_edge (then_bb, new_bb, EDGE_FALLTHRU); make_edge (else_bb, new_bb, EDGE_FALLTHRU); *tp = tmp_var; gsi = gsi_last_nondebug_bb (new_bb); } else { gsi = gsi_last_nondebug_bb (new_bb); if (device != NULL_TREE) device = force_gimple_operand_gsi (&gsi, device, true, NULL_TREE, true, GSI_SAME_STMT); } t = gimple_omp_target_data_arg (entry_stmt); if (t == NULL) { t1 = size_zero_node; t2 = build_zero_cst (ptr_type_node); t3 = t2; t4 = t2; } else { t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1)))); t1 = size_binop (PLUS_EXPR, t1, size_int (1)); t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0)); t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1)); t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2)); } gimple *g; bool tagging = false; /* The maximum number used by any start_ix, without varargs. */ auto_vec<tree, 11> args; if (is_gimple_omp_oacc (entry_stmt)) { tree goacc_flags_m = fold_build1 (GOACC_FLAGS_MARSHAL_OP, TREE_TYPE (goacc_flags), goacc_flags); goacc_flags_m = force_gimple_operand_gsi (&gsi, goacc_flags_m, true, NULL_TREE, true, GSI_SAME_STMT); args.quick_push (goacc_flags_m); } else args.quick_push (device); if (offloaded) args.quick_push (build_fold_addr_expr (child_fn)); args.quick_push (t1); args.quick_push (t2); args.quick_push (t3); args.quick_push (t4); switch (start_ix) { case BUILT_IN_GOACC_DATA_START: case BUILT_IN_GOACC_DECLARE: case BUILT_IN_GOMP_TARGET_DATA: break; case BUILT_IN_GOMP_TARGET: case BUILT_IN_GOMP_TARGET_UPDATE: case BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA: args.quick_push (build_int_cst (unsigned_type_node, flags_i)); c = omp_find_clause (clauses, OMP_CLAUSE_DEPEND); if (c) depend = OMP_CLAUSE_DECL (c); else depend = build_int_cst (ptr_type_node, 0); args.quick_push (depend); if (start_ix == BUILT_IN_GOMP_TARGET) args.quick_push (get_target_arguments (&gsi, entry_stmt)); break; case BUILT_IN_GOACC_PARALLEL: if (lookup_attribute ("oacc serial", DECL_ATTRIBUTES (child_fn)) != NULL) { tree dims = NULL_TREE; unsigned int ix; /* For serial constructs we set all dimensions to 1. */ for (ix = GOMP_DIM_MAX; ix--;) dims = tree_cons (NULL_TREE, integer_one_node, dims); oacc_replace_fn_attrib (child_fn, dims); } else oacc_set_fn_attrib (child_fn, clauses, &args); tagging = true; /* FALLTHRU */ case BUILT_IN_GOACC_ENTER_EXIT_DATA: case BUILT_IN_GOACC_UPDATE: { tree t_async = NULL_TREE; /* If present, use the value specified by the respective clause, making sure that is of the correct type. */ c = omp_find_clause (clauses, OMP_CLAUSE_ASYNC); if (c) t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_ASYNC_EXPR (c)); else if (!tagging) /* Default values for t_async. */ t_async = fold_convert_loc (gimple_location (entry_stmt), integer_type_node, build_int_cst (integer_type_node, GOMP_ASYNC_SYNC)); if (tagging && t_async) { unsigned HOST_WIDE_INT i_async = GOMP_LAUNCH_OP_MAX; if (TREE_CODE (t_async) == INTEGER_CST) { /* See if we can pack the async arg in to the tag's operand. */ i_async = TREE_INT_CST_LOW (t_async); if (i_async < GOMP_LAUNCH_OP_MAX) t_async = NULL_TREE; else i_async = GOMP_LAUNCH_OP_MAX; } args.safe_push (oacc_launch_pack (GOMP_LAUNCH_ASYNC, NULL_TREE, i_async)); } if (t_async) args.safe_push (force_gimple_operand_gsi (&gsi, t_async, true, NULL_TREE, true, GSI_SAME_STMT)); /* Save the argument index, and ... */ unsigned t_wait_idx = args.length (); unsigned num_waits = 0; c = omp_find_clause (clauses, OMP_CLAUSE_WAIT); if (!tagging || c) /* ... push a placeholder. */ args.safe_push (integer_zero_node); for (; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT) { tree arg = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_WAIT_EXPR (c)); arg = force_gimple_operand_gsi (&gsi, arg, true, NULL_TREE, true, GSI_SAME_STMT); args.safe_push (arg); num_waits++; } if (!tagging || num_waits) { tree len; /* Now that we know the number, update the placeholder. */ if (tagging) len = oacc_launch_pack (GOMP_LAUNCH_WAIT, NULL_TREE, num_waits); else len = build_int_cst (integer_type_node, num_waits); len = fold_convert_loc (gimple_location (entry_stmt), unsigned_type_node, len); args[t_wait_idx] = len; } } break; default: gcc_unreachable (); } if (tagging) /* Push terminal marker - zero. */ args.safe_push (oacc_launch_pack (0, NULL_TREE, 0)); g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args); gimple_set_location (g, gimple_location (entry_stmt)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); if (!offloaded) { g = gsi_stmt (gsi); gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET); gsi_remove (&gsi, true); } if (data_region && region->exit) { gsi = gsi_last_nondebug_bb (region->exit); g = gsi_stmt (gsi); gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN); gsi_remove (&gsi, true); } } /* Expand the parallel region tree rooted at REGION. Expansion proceeds in depth-first order. Innermost regions are expanded first. This way, parallel regions that require a new function to be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any internal dependencies in their body. */ static void expand_omp (struct omp_region *region) { omp_any_child_fn_dumped = false; while (region) { location_t saved_location; gimple *inner_stmt = NULL; /* First, determine whether this is a combined parallel+workshare region. */ if (region->type == GIMPLE_OMP_PARALLEL) determine_parallel_type (region); if (region->type == GIMPLE_OMP_FOR && gimple_omp_for_combined_p (last_stmt (region->entry))) inner_stmt = last_stmt (region->inner->entry); if (region->inner) expand_omp (region->inner); saved_location = input_location; if (gimple_has_location (last_stmt (region->entry))) input_location = gimple_location (last_stmt (region->entry)); switch (region->type) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: expand_omp_taskreg (region); break; case GIMPLE_OMP_FOR: expand_omp_for (region, inner_stmt); break; case GIMPLE_OMP_SECTIONS: expand_omp_sections (region); break; case GIMPLE_OMP_SECTION: /* Individual omp sections are handled together with their parent GIMPLE_OMP_SECTIONS region. */ break; case GIMPLE_OMP_SINGLE: expand_omp_single (region); break; case GIMPLE_OMP_ORDERED: { gomp_ordered *ord_stmt = as_a <gomp_ordered *> (last_stmt (region->entry)); if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt), OMP_CLAUSE_DEPEND)) { /* We'll expand these when expanding corresponding worksharing region with ordered(n) clause. */ gcc_assert (region->outer && region->outer->type == GIMPLE_OMP_FOR); region->ord_stmt = ord_stmt; break; } } /* FALLTHRU */ case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_TEAMS: expand_omp_synch (region); break; case GIMPLE_OMP_ATOMIC_LOAD: expand_omp_atomic (region); break; case GIMPLE_OMP_TARGET: expand_omp_target (region); break; default: gcc_unreachable (); } input_location = saved_location; region = region->next; } if (omp_any_child_fn_dumped) { if (dump_file) dump_function_header (dump_file, current_function_decl, dump_flags); omp_any_child_fn_dumped = false; } } /* Helper for build_omp_regions. Scan the dominator tree starting at block BB. PARENT is the region that contains BB. If SINGLE_TREE is true, the function ends once a single tree is built (otherwise, whole forest of OMP constructs may be built). */ static void build_omp_regions_1 (basic_block bb, struct omp_region *parent, bool single_tree) { gimple_stmt_iterator gsi; gimple *stmt; basic_block son; gsi = gsi_last_nondebug_bb (bb); if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi))) { struct omp_region *region; enum gimple_code code; stmt = gsi_stmt (gsi); code = gimple_code (stmt); if (code == GIMPLE_OMP_RETURN) { /* STMT is the return point out of region PARENT. Mark it as the exit point and make PARENT the immediately enclosing region. */ gcc_assert (parent); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_ATOMIC_STORE) { /* GIMPLE_OMP_ATOMIC_STORE is analogous to GIMPLE_OMP_RETURN, but matches with GIMPLE_OMP_ATOMIC_LOAD. */ gcc_assert (parent); gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_CONTINUE) { gcc_assert (parent); parent->cont = bb; } else if (code == GIMPLE_OMP_SECTIONS_SWITCH) { /* GIMPLE_OMP_SECTIONS_SWITCH is part of GIMPLE_OMP_SECTIONS, and we do nothing for it. */ } else { region = new_omp_region (bb, code, parent); /* Otherwise... */ if (code == GIMPLE_OMP_TARGET) { switch (gimple_omp_target_kind (stmt)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_SERIAL: case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: break; case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_ENTER_DATA: case GF_OMP_TARGET_KIND_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_DECLARE: /* ..., other than for those stand-alone directives... */ region = NULL; break; default: gcc_unreachable (); } } else if (code == GIMPLE_OMP_ORDERED && omp_find_clause (gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt)), OMP_CLAUSE_DEPEND)) /* #pragma omp ordered depend is also just a stand-alone directive. */ region = NULL; else if (code == GIMPLE_OMP_TASK && gimple_omp_task_taskwait_p (stmt)) /* #pragma omp taskwait depend(...) is a stand-alone directive. */ region = NULL; /* ..., this directive becomes the parent for a new region. */ if (region) parent = region; } } if (single_tree && !parent) return; for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) build_omp_regions_1 (son, parent, single_tree); } /* Builds the tree of OMP regions rooted at ROOT, storing it to root_omp_region. */ static void build_omp_regions_root (basic_block root) { gcc_assert (root_omp_region == NULL); build_omp_regions_1 (root, NULL, true); gcc_assert (root_omp_region != NULL); } /* Expands omp construct (and its subconstructs) starting in HEAD. */ void omp_expand_local (basic_block head) { build_omp_regions_root (head); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); omp_free_regions (); } /* Scan the CFG and build a tree of OMP regions. Return the root of the OMP region tree. */ static void build_omp_regions (void) { gcc_assert (root_omp_region == NULL); calculate_dominance_info (CDI_DOMINATORS); build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false); } /* Main entry point for expanding OMP-GIMPLE into runtime calls. */ static unsigned int execute_expand_omp (void) { build_omp_regions (); if (!root_omp_region) return 0; if (dump_file) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) verify_loop_structure (); cleanup_tree_cfg (); omp_free_regions (); return 0; } /* OMP expansion -- the default pass, run before creation of SSA form. */ namespace { const pass_data pass_data_expand_omp = { GIMPLE_PASS, /* type */ "ompexp", /* name */ OPTGROUP_OMP, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_eomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_expand_omp : public gimple_opt_pass { public: pass_expand_omp (gcc::context *ctxt) : gimple_opt_pass (pass_data_expand_omp, ctxt) {} /* opt_pass methods: */ virtual unsigned int execute (function *) { bool gate = ((flag_openacc != 0 || flag_openmp != 0 || flag_openmp_simd != 0) && !seen_error ()); /* This pass always runs, to provide PROP_gimple_eomp. But often, there is nothing to do. */ if (!gate) return 0; return execute_expand_omp (); } }; // class pass_expand_omp } // anon namespace gimple_opt_pass * make_pass_expand_omp (gcc::context *ctxt) { return new pass_expand_omp (ctxt); } namespace { const pass_data pass_data_expand_omp_ssa = { GIMPLE_PASS, /* type */ "ompexpssa", /* name */ OPTGROUP_OMP, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ PROP_gimple_eomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */ }; class pass_expand_omp_ssa : public gimple_opt_pass { public: pass_expand_omp_ssa (gcc::context *ctxt) : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *fun) { return !(fun->curr_properties & PROP_gimple_eomp); } virtual unsigned int execute (function *) { return execute_expand_omp (); } opt_pass * clone () { return new pass_expand_omp_ssa (m_ctxt); } }; // class pass_expand_omp_ssa } // anon namespace gimple_opt_pass * make_pass_expand_omp_ssa (gcc::context *ctxt) { return new pass_expand_omp_ssa (ctxt); } /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant GIMPLE_* codes. */ bool omp_make_gimple_edges (basic_block bb, struct omp_region **region, int *region_idx) { gimple *last = last_stmt (bb); enum gimple_code code = gimple_code (last); struct omp_region *cur_region = *region; bool fallthru = false; switch (code) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_FOR: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_TEAMS: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_SECTION: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; break; case GIMPLE_OMP_TASK: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; if (gimple_omp_task_taskwait_p (last)) cur_region = cur_region->outer; break; case GIMPLE_OMP_ORDERED: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; if (omp_find_clause (gimple_omp_ordered_clauses (as_a <gomp_ordered *> (last)), OMP_CLAUSE_DEPEND)) cur_region = cur_region->outer; break; case GIMPLE_OMP_TARGET: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; switch (gimple_omp_target_kind (last)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_SERIAL: case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: break; case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_ENTER_DATA: case GF_OMP_TARGET_KIND_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_DECLARE: cur_region = cur_region->outer; break; default: gcc_unreachable (); } break; case GIMPLE_OMP_SECTIONS: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; break; case GIMPLE_OMP_SECTIONS_SWITCH: fallthru = false; break; case GIMPLE_OMP_ATOMIC_LOAD: case GIMPLE_OMP_ATOMIC_STORE: fallthru = true; break; case GIMPLE_OMP_RETURN: /* In the case of a GIMPLE_OMP_SECTION, the edge will go somewhere other than the next block. This will be created later. */ cur_region->exit = bb; if (cur_region->type == GIMPLE_OMP_TASK) /* Add an edge corresponding to not scheduling the task immediately. */ make_edge (cur_region->entry, bb, EDGE_ABNORMAL); fallthru = cur_region->type != GIMPLE_OMP_SECTION; cur_region = cur_region->outer; break; case GIMPLE_OMP_CONTINUE: cur_region->cont = bb; switch (cur_region->type) { case GIMPLE_OMP_FOR: /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE succs edges as abnormal to prevent splitting them. */ single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL; /* Make the loopback edge. */ make_edge (bb, single_succ (cur_region->entry), EDGE_ABNORMAL); /* Create an edge from GIMPLE_OMP_FOR to exit, which corresponds to the case that the body of the loop is not executed at all. */ make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL); make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL); fallthru = false; break; case GIMPLE_OMP_SECTIONS: /* Wire up the edges into and out of the nested sections. */ { basic_block switch_bb = single_succ (cur_region->entry); struct omp_region *i; for (i = cur_region->inner; i ; i = i->next) { gcc_assert (i->type == GIMPLE_OMP_SECTION); make_edge (switch_bb, i->entry, 0); make_edge (i->exit, bb, EDGE_FALLTHRU); } /* Make the loopback edge to the block with GIMPLE_OMP_SECTIONS_SWITCH. */ make_edge (bb, switch_bb, 0); /* Make the edge from the switch to exit. */ make_edge (switch_bb, bb->next_bb, 0); fallthru = false; } break; case GIMPLE_OMP_TASK: fallthru = true; break; default: gcc_unreachable (); } break; default: gcc_unreachable (); } if (*region != cur_region) { *region = cur_region; if (cur_region) *region_idx = cur_region->entry->index; else *region_idx = 0; } return fallthru; }
bcnn_utils.h
/* * Copyright (c) 2016-present Jean-Noel Braun. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef BCNN_UTILS_H #define BCNN_UTILS_H #include "bcnn/bcnn.h" /* OpenMP */ #ifdef BCNN_USE_OPENMP #include <omp.h> #endif /* Cuda include */ #ifdef BCNN_USE_CUDA #include <cublas_v2.h> #include <cuda.h> #include <cuda_runtime.h> #include <curand.h> #include <driver_types.h> // cuda driver types #ifdef BCNN_USE_CUDNN #include <cudnn.h> #endif #endif #include <stdio.h> #include <stdlib.h> // RAND_MAX #include <time.h> #ifdef __cplusplus extern "C" { #endif typedef struct { int state; float r; } bcnn_gauss_gen; typedef struct { bcnn_log_callback fct; bcnn_log_level lvl; } bcnn_log_context; /** Convenient macros */ #define BCNN_CHECK(exp, err) \ do { \ if (!(exp)) { \ return (err); \ } \ } while (0) #define BCNN_CHECK_AND_LOG(ctx, exp, err, fmt, ...) \ do { \ if (!(exp)) { \ bcnn_log((ctx), BCNN_LOG_ERROR, (fmt), ##__VA_ARGS__); \ return (err); \ } \ } while (0) #define BCNN_CHECK_STATUS(s) \ do { \ bcnn_status ret = (s); \ if ((ret) != BCNN_SUCCESS) { \ return (ret); \ } \ } while (0) #define BCNN_ERROR(ctx, err, fmt, ...) \ do { \ bcnn_log((ctx), BCNN_LOG_ERROR, (fmt), ##__VA_ARGS__); \ return (err); \ } while (0) #define BCNN_INFO(ctx, fmt, ...) \ do { \ bcnn_log((ctx), BCNN_LOG_INFO, (fmt), ##__VA_ARGS__); \ } while (0) #define BCNN_WARNING(ctx, fmt, ...) \ do { \ bcnn_log((ctx), BCNN_LOG_WARNING, (fmt), ##__VA_ARGS__); \ } while (0) #define BCNN_PARSE_CLEANUP(l, t, n) \ do { \ for (int i = 0; i < (n); ++i) { \ bh_free((t[i])); \ } \ bh_free(t); \ bh_free(l); \ } while (0) float bcnn_rng_gaussian(bcnn_gauss_gen *g); void bcnn_log(bcnn_log_context ctx, bcnn_log_level level, const char *fmt, ...); void bcnn_draw_color_box(unsigned char *img, int w_img, int h_img, float cx_box, float cy_box, float w_box, float h_box, unsigned char color[3]); static inline int bcnn_rand_between(int min, int max) { if (min > max) { return 0.f; } return (int)(((float)rand() / RAND_MAX * (max - min)) + min + 0.5f); } static inline int bcnn_omp_get_num_threads() { #ifdef BCNN_USE_OPENMP int n = 0; #pragma omp parallel reduction(+ : n) n += 1; return n; #else return 1; #endif } static inline const char *bcnn_act2str(bcnn_activation a) { switch (a) { case BCNN_ACT_TANH: return "Tanh"; case BCNN_ACT_RELU: return "ReLU"; case BCNN_ACT_RAMP: return "Ramp"; case BCNN_ACT_SOFTPLUS: return "Softplus"; case BCNN_ACT_LRELU: return "Leaky-ReLU"; case BCNN_ACT_ABS: return "AbsVal"; case BCNN_ACT_CLAMP: return "Clamp"; case BCNN_ACT_PRELU: return "PReLU"; default: return "None"; } } #ifdef BCNN_USE_CUDA #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200) #define BCNN_CUDA_THREADS 1024 #else #define BCNN_CUDA_THREADS 512 #endif static inline int bcnn_cuda_blocks(const int n) { return (n - 1) / (BCNN_CUDA_THREADS) + 1; } cublasHandle_t bcnn_cublas_handle(); #define bcnn_cuda_check(RET) \ { \ if ((RET) != cudaSuccess) { \ fprintf(stderr, "[ERROR] [CUDA] %s\n", cudaGetErrorString((RET))); \ exit((RET)); \ } \ } #define bcnn_cublas_check(RET) \ { \ if ((RET) != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "[ERROR] [CUBLAS] %d\n", (int)(RET)); \ exit((RET)); \ } \ } #define bcnn_curand_check(RET) \ { \ if ((RET) != CURAND_STATUS_SUCCESS) { \ fprintf(stderr, "[ERROR] [CURAND] %d\n", (int)(RET)); \ exit((RET)); \ } \ } int *bcnn_cuda_malloc_i32(int n); float *bcnn_cuda_malloc_f32(int n); float *bcnn_cuda_memcpy_f32(float *x, int n); void bcnn_cuda_memcpy_f32_noalloc(float *x, float *x_gpu, int n); void bcnn_cuda_fill_with_random(float *x_gpu, int n); void bcnn_cuda_free(void *x_gpu); void bcnn_cuda_memcpy_host2dev(float *x_gpu, float *x, int n); void bcnn_cuda_memcpy_dev2host(float *x_gpu, float *x, int n); void bcnn_cuda_set_device(int id); #ifdef BCNN_USE_CUDNN #define bcnn_cudnn_check(RET) \ { \ if ((RET) != CUDNN_STATUS_SUCCESS) { \ fprintf(stderr, "[ERROR] [CUDNN] %s\n", \ cudnnGetErrorString((RET))); \ exit((RET)); \ } \ } cudnnHandle_t bcnn_cudnn_handle(); #endif // BCNN_USE_CUDNN #endif // BCNN_USE_CUDA #ifdef __cplusplus } #endif #endif // BCNN_UTILS_H
pace.h
// // Created by sylwester on 4/16/20. // #ifndef ALGORITHMSPROJECT_PACE_H #define ALGORITHMSPROJECT_PACE_H #include "graphs/treewidth/id_func.h" #include "graphs/treewidth/list_graph.h" #include "graphs/treewidth/multi_arc.h" #include "graphs/treewidth/sort_arc.h" #include "graphs/treewidth/chain.h" #include "graphs/treewidth/flow_cutter.h" #include "graphs/treewidth/greedy_order.h" #include "graphs/treewidth/node_flow_cutter.h" #include "graphs/treewidth/contraction_graph.h" #include "graphs/treewidth/cch_order.h" #include "graphs/treewidth/tree_decomposition.h" #include "graphs/treewidth/separator.h" #include "graphs/treewidth/TreewidthDecomposition.h" #include <limits> #include <signal.h> #include <stdlib.h> #include <string.h> #include <string> #include <sstream> #ifdef PARALLELIZE #include <omp.h> #include <atomic> #endif #include <sys/time.h> #include <unistd.h> using namespace std; struct TREEWIDTH { ArrayIDIDFunc tail, head; const char *volatile best_decomposition = 0; int best_bag_size = numeric_limits<int>::max(); void ignore_return_value(int) {} int compute_max_bag_size(const ArrayIDIDFunc &order) { auto inv_order = inverse_permutation(order); int current_tail = -1; int current_tail_up_deg = 0; int max_up_deg = 0; compute_chordal_supergraph( chain(tail, inv_order), chain(head, inv_order), [&](int x, int y) { if (current_tail != x) { current_tail = x; max_to(max_up_deg, current_tail_up_deg); current_tail_up_deg = 0; } ++current_tail_up_deg; } ); return max_up_deg + 1; } unsigned long long get_milli_time() { struct timeval tv; gettimeofday(&tv, NULL); return (unsigned long long) (tv.tv_sec) * 1000 + (unsigned long long) (tv.tv_usec) / 1000; } const char *compute_decomposition(const ArrayIDIDFunc &order) { ostringstream out; print_tree_decompostion(out, tail, head, move(order)); char *buf = new char[out.str().length() + 1]; memcpy(buf, out.str().c_str(), out.str().length() + 1); return buf; } void test_new_order(ArrayIDIDFunc order) { // cerr << "testing new order" << endl; int x = compute_max_bag_size(order); // cerr << "max bag size computed, x = " << x << endl; #ifdef PARALLELIZE #pragma omp critical #endif { if (x < best_bag_size) { best_bag_size = x; const char *old_decomposition = best_decomposition; // string s(old_decomposition); // DEBUG( s ); best_decomposition = compute_decomposition(move(order)); // string s = string(best_decomposition); // DEBUG(s); delete[]old_decomposition; { string msg = "c status " + to_string(best_bag_size) + " " + to_string(get_milli_time()) + "\n"; // ignore_return_value(write(STDOUT_FILENO, msg.data(), msg.length())); } } } // cerr << "testing new order" << endl; } // char no_decomposition_message[] = "c info programm was aborted before any decomposition was computed\n"; string no_decomposition_message = "c info programm was aborted before any decomposition was computed\n"; #ifdef PARALLELIZE volatile atomic_flag only_one_thread_in_signal_handler = ATOMIC_FLAG_INIT; #endif void signal_handler(int) { #ifdef PARALLELIZE while (only_one_thread_in_signal_handler.test_and_set()) {} #endif const char *x = best_decomposition; if (x != 0) ignore_return_value(write(STDOUT_FILENO, x, strlen(x))); else // ignore_return_value(write(STDOUT_FILENO, no_decomposition_message, sizeof(no_decomposition_message))); // original ignore_return_value(write(STDOUT_FILENO, no_decomposition_message.c_str(), sizeof(no_decomposition_message.c_str()))); _Exit(EXIT_SUCCESS); } /** * * @param V graph for which to construct decomposition * @param maxCnt - maximal number of times that a new order does not improve the solution * @param tle true if program received sigterm * @param argc * @return */ TreewidthDecomposition main(VVI& V, int maxCnt, volatile sig_atomic_t& tle/*, int argc=1, char *argv[]*/) { // signal(SIGTERM, signal_handler); // signal(SIGINT, signal_handler); // signal(SIGSEGV, signal_handler); // swap(no_decomposition_message, "c info programm was aborted before any decomposition was computed\n"); stringstream passToCin; int N = V.size(); int E = 0; for(VI& v : V) E += v.size(); E >>= 1; passToCin << "p tw " << N << " " << E << endl; for( int i=0; i<N; i++ ) for( int d : V[i] ) if(d>i) passToCin << i+1 << " " << d+1 << endl; const char* cstr = passToCin.str().c_str(); // cerr << "str(): " << passToCin.str() << endl; ignore_return_value( write( STDIN_FILENO, cstr, sizeof(cstr) ) ); /*double startTime = clock(); function<bool()> localLimitExceeded = [&startTime, &maxTimeMilis](){ double now = clock(); double milis = ( now - startTime ) / (double)CLOCKS_PER_SEC; milis *= 1000; // DEBUG(startTime); // DEBUG(now); // DEBUG(milis); // DEBUG(maxTimeMilis); // // cerr << "returning: " << (milis > maxTimeMilis) << endl; // ENDL(1); // return (milis > maxTimeMilis); return true; };*/ try { { string file_name = "-"; auto g = uncached_load_pace_graph(file_name, passToCin); tail = std::move(g.tail); head = std::move(g.head); } int random_seed = 0; //#ifdef PARALLELIZE //#pragma omp parallel //#endif { try { //#ifdef PARALLELIZE //#pragma omp sections nowait //#endif { // test_new_order( compute_greedy_min_degree_order(tail, head) ); if(!tle ){ // cerr << "testing compute_greedy_min_degree_order" << endl; auto ord = compute_greedy_min_degree_order(tail, head); if(!tle ) test_new_order(ord); // cerr << "\tfinished" << endl; } //#ifdef PARALLELIZE //#pragma omp section //#endif // test_new_order(compute_greedy_min_shortcut_order(tail, head)); if(!tle ){ // cerr << "testing compute_greedy_min_shortcut_order" << endl; auto ord = compute_greedy_min_shortcut_order(tail, head); if(!tle ) test_new_order(ord); // cerr << "\tfinished" << endl; } } std::minstd_rand rand_gen; rand_gen.seed( random_seed //#ifdef PARALLELIZE // + omp_get_thread_num() //#endif ); flow_cutter::Config config; config.cutter_count = 1; config.random_seed = rand_gen(); bool improved = false; int cnt = 0; const int MAX_CNT = maxCnt; auto extractTW = [ =,&V ](){ if( best_decomposition == 0 ) return (int)V.size(); istringstream res( best_decomposition ); int tw; string empty; res >> empty >> empty >> empty >> tw; return tw; }; int prevTW = extractTW(); // DEBUG(prevTW); // cerr << endl; for (int i = 0;; ++i) { // cerr << "\rcnt: " << cnt << flush; config.random_seed = rand_gen(); if (i % 32 == 0) ++config.cutter_count; // test_new_order( cch_order::compute_cch_graph_order(tail, head, flow_cutter::ComputeSeparator(config)) ); ArrayIDIDFunc ord; if(!tle ) ord = cch_order::compute_cch_graph_order(tail, head, flow_cutter::ComputeSeparator(config)); if(!tle ) test_new_order( ord ); int newTW = extractTW(); // DEBUG(prevTW); // DEBUG(newTW); if( newTW < prevTW ){ improved = true; cnt=0; prevTW = newTW; }else{ improved = false; cnt++; } if(tle || cnt > MAX_CNT ) break; } // string s(best_decomposition); // DEBUG(s); if( best_decomposition == 0 ) return TreewidthDecomposition( V, {}, {} ); // no treewidth decomposition, returning empty istringstream res( best_decomposition ); string t; int DECOMP_SIZE = -1, TW = -1, N = -1; VVI BAGS; VVI STRUCTURE; while( getline(res,t) ){ // cerr << "t: " << t << endl; if(t[0] == 'c') continue; stringstream line(t); string empty; if( t[0] == 's' ){ line >> empty >> empty >> DECOMP_SIZE >> TW >> N; BAGS = STRUCTURE = VVI( DECOMP_SIZE ); }else if( t[0] == 'b' ){ int a, id; line >> empty >> id; while( line >> a ) BAGS[id-1].push_back(a-1); }else{ int a,b; line >> a >> b; STRUCTURE[a-1].push_back(b-1); STRUCTURE[b-1].push_back(a-1); } } // DEBUG(BAGS); return TreewidthDecomposition(V, STRUCTURE, BAGS ); // } catch (...) { cerr << "error1" << endl; return TreewidthDecomposition( V, {}, {} ); signal_handler(0); } } } catch (...) { cerr << "error2" << endl; return TreewidthDecomposition( V, {}, {} ); signal_handler(0); } } }; #endif //ALGORITHMSPROJECT_PACE_H
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-2,3)),ceild(16*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(8*t1+Ny+13,24)),floord(16*t2+Ny+12,24)),floord(16*t1-16*t2+Nz+Ny+11,24));t3++) { for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32)),ceild(24*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(8*t1+Nx+13,32)),floord(16*t2+Nx+12,32)),floord(24*t3+Nx+20,32)),floord(16*t1-16*t2+Nz+Nx+11,32));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),24*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),24*t3+22),32*t4+30),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
SSE41search.c
#include "SSE41search.h" // CPU search using SSE instrucions and Score Profile technique void search_sse41_sp (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned long int query_sequences_count, unsigned int * query_disp, char * vect_sequences_db, unsigned short int * vect_sequences_db_lengths, unsigned short int * vect_sequences_db_blocks, unsigned long int vect_sequences_db_count, unsigned long int * vect_sequences_db_disp, char * submat, int open_gap, int extend_gap, int n_threads, int block_size, int * scores, double * workTime){ long int i, j, k; double tick; char *a, * b; unsigned int * a_disp; unsigned long int * b_disp = NULL; unsigned short int * m, *n, *nbbs, sequences_db_max_length, query_sequences_max_length; a = query_sequences; m = query_sequences_lengths; a_disp = query_disp; query_sequences_max_length = query_sequences_lengths[query_sequences_count-1]; sequences_db_max_length = vect_sequences_db_lengths[vect_sequences_db_count-1]; b = vect_sequences_db; n = vect_sequences_db_lengths; nbbs = vect_sequences_db_blocks; b_disp = vect_sequences_db_disp; tick = dwalltime(); #pragma omp parallel default(none) shared(block_size, a, b, n, nbbs, m, a_disp, b_disp, submat, scores, query_sequences_count, vect_sequences_db_count, open_gap, extend_gap, sequences_db_max_length, query_sequences_max_length) num_threads(n_threads) { __m128i *row1, *row2, *maxCol, *maxRow, *lastCol, * ptr_scores, *tmp; __m128i*ptr_scoreProfile1, *ptr_scoreProfile2, *ptr_scoreProfile3, *ptr_scoreProfile4; char * ptr_a, * ptr_b, * scoreProfile; __declspec(align(MEMALIGN)) __m128i score, auxBlosum[2], auxLastCol, b_values; __declspec(align(MEMALIGN)) __m128i current1, current2, current3, current4, previous2, previous3, previous4; __declspec(align(MEMALIGN)) __m128i aux0, aux1, aux2, aux3, aux4, aux5, aux6, aux7, aux8; __declspec(align(MEMALIGN)) __m128i vextend_gap_epi8 = _mm_set1_epi8(extend_gap), vopen_extend_gap_epi8 = _mm_set1_epi8(open_gap+extend_gap), vzero_epi8 = _mm_set1_epi8(0); __declspec(align(MEMALIGN)) __m128i vextend_gap_epi16 = _mm_set1_epi16(extend_gap), vopen_extend_gap_epi16 = _mm_set1_epi16(open_gap+extend_gap), vzero_epi16 = _mm_set1_epi16(0); __declspec(align(MEMALIGN)) __m128i vextend_gap_epi32 = _mm_set1_epi32(extend_gap), vopen_extend_gap_epi32 = _mm_set1_epi32(open_gap+extend_gap), vzero_epi32 = _mm_set1_epi32(0); // SP __declspec(align(MEMALIGN)) __m128i v15 = _mm_set1_epi8(15), v16 = _mm_set1_epi8(16), vneg32 = _mm_set1_epi8(-32); // overflow __declspec(align(MEMALIGN)) __m128i v127 = _mm_set1_epi8(127), v32767 = _mm_set1_epi16(32767); // bias __declspec(align(MEMALIGN)) __m128i v128 = _mm_set1_epi32(128), v32768 = _mm_set1_epi32(32768); unsigned int i, j, ii, jj, k, disp_1, disp_2, disp_3, disp_4, disp_5, dim1, dim2, nbb; unsigned long int t, s, q; int overflow_flag, bb1, bb1_start, bb1_end, bb2, bb2_start, bb2_end; // allocate memory for auxiliary buffers row1 = (__m128i *) _mm_malloc((block_size+1)*sizeof(__m128i), MEMALIGN); row2 = (__m128i *) _mm_malloc((block_size+1)*sizeof(__m128i), MEMALIGN); maxCol = (__m128i *) _mm_malloc((block_size+1)*sizeof(__m128i), MEMALIGN); maxRow = (__m128i *) _mm_malloc((query_sequences_max_length)*sizeof(__m128i), MEMALIGN); lastCol = (__m128i *) _mm_malloc((query_sequences_max_length)*sizeof(__m128i), MEMALIGN); scoreProfile = (char *) _mm_malloc((SUBMAT_ROWS_x_SSE_INT8_VECTOR_LENGTH*block_size)*sizeof(char), MEMALIGN); // calculate alignment score #pragma omp for schedule(dynamic) nowait for (t=0; t< query_sequences_count*vect_sequences_db_count; t++) { q = (query_sequences_count-1) - (t % query_sequences_count); s = (vect_sequences_db_count-1) - (t / query_sequences_count); ptr_a = a + a_disp[q]; ptr_b = b + b_disp[s]; ptr_scores = (__m128i *) (scores + (q*vect_sequences_db_count+s)*SSE_INT8_VECTOR_LENGTH); // caluclate number of blocks nbb = nbbs[s]; // init buffers #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi8(-128); #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi8(-128); // set score to 0 score = _mm_set1_epi8(-128); for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*block_size; dim1 = n[s]-disp_4; dim1 = (block_size < dim1 ? block_size : dim1); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // calculate a[i] displacement disp_1 = dim1*SSE_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi8(-128); #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi8(-128); auxLastCol = _mm_set1_epi8(-128); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*SSE_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm_sub_epi8(b_values, v16); // indexes < 16 aux2 = _mm_cmpgt_epi8(b_values,v15); aux3 = _mm_and_si128(aux2,vneg32); aux4 = _mm_add_epi8(b_values,aux3); ptr_scoreProfile1 = (__m128i*)(scoreProfile) + i; #pragma unroll for (j=0; j< SUBMAT_ROWS-1; j++) { tmp = (__m128i *) (submat + j*SUBMAT_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4); aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1); aux7 = _mm_add_epi8(aux5, aux6); _mm_store_si128(ptr_scoreProfile1+j*dim1, aux7); } _mm_store_si128(ptr_scoreProfile1+(SUBMAT_ROWS-1)*dim1, vzero_epi8); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous2 = lastCol[i+1]; previous3 = lastCol[i+2]; previous4 = lastCol[i+3]; // calculate score profile displacement ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1); ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1); ptr_scoreProfile3 = (__m128i *)(scoreProfile+((int)(ptr_a[i+2]))*disp_1); ptr_scoreProfile4 = (__m128i *)(scoreProfile+((int)(ptr_a[i+3]))*disp_1); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; aux3 = maxRow[i+2]; aux4 = maxRow[i+3]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current1 = _mm_adds_epi8(row1[j-1], _mm_load_si128(ptr_scoreProfile1+(j-1))); // calculate current1 max value current1 = _mm_max_epi8(current1, aux1); current1 = _mm_max_epi8(current1, maxCol[j]); //current1 = _mm_max_epi8(current1, vzero_epi8); // update maxRow and maxCol aux1 = _mm_subs_epi8(aux1, vextend_gap_epi8); maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm_subs_epi8(current1, vopen_extend_gap_epi8); aux1 = _mm_max_epi8(aux1, aux0); maxCol[j] = _mm_max_epi8(maxCol[j], aux0); // update max score score = _mm_max_epi8(score,current1); //calcuate the diagonal value current2 = _mm_adds_epi8(previous2, _mm_load_si128(ptr_scoreProfile2+(j-1))); // update previous previous2 = current1; // calculate current2 max value current2 = _mm_max_epi8(current2, aux2); current2 = _mm_max_epi8(current2, maxCol[j]); //current2 = _mm_max_epi8(current2, vzero_epi8); // update maxRow and maxCol aux2 = _mm_subs_epi8(aux2, vextend_gap_epi8); maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm_subs_epi8(current2, vopen_extend_gap_epi8); aux2 = _mm_max_epi8(aux2, aux0); maxCol[j] = _mm_max_epi8(maxCol[j], aux0); // update max score score = _mm_max_epi8(score,current2); //calcuate the diagonal value current3 = _mm_adds_epi8(previous3, _mm_load_si128(ptr_scoreProfile3+(j-1))); // update previous previous3 = current2; // calculate current3 max value current3 = _mm_max_epi8(current3, aux3); current3 = _mm_max_epi8(current3, maxCol[j]); //current3 = _mm_max_epi8(current3, vzero_epi8); // update maxRow and maxCol aux3 = _mm_subs_epi8(aux3, vextend_gap_epi8); maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm_subs_epi8(current3, vopen_extend_gap_epi8); aux3 = _mm_max_epi8(aux3, aux0); maxCol[j] = _mm_max_epi8(maxCol[j], aux0); // update max score score = _mm_max_epi8(score,current3); //calcuate the diagonal value current4 = _mm_adds_epi8(previous4, _mm_load_si128(ptr_scoreProfile4+(j-1))); // update previous previous4 = current3; // calculate current4 max value current4 = _mm_max_epi8(current4, aux4); current4 = _mm_max_epi8(current4, maxCol[j]); //current4 = _mm_max_epi8(current4, vzero_epi8); // update maxRow and maxCol aux4 = _mm_subs_epi8(aux4, vextend_gap_epi8); maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm_subs_epi8(current4, vopen_extend_gap_epi8); aux4 = _mm_max_epi8(aux4, aux0); maxCol[j] = _mm_max_epi8(maxCol[j], aux0); // update max score score = _mm_max_epi8(score,current4); // update row buffer row2[j] = current4; } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; maxRow[i+2] = aux3; maxRow[i+3] = aux4; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; lastCol[i+2] = current2; lastCol[i+3] = current3; auxLastCol = current4; // swap buffers tmp = row1; row1 = row2; row2 = tmp; } } // store max value aux1 = _mm_add_epi32(_mm_cvtepi8_epi32(score),v128); _mm_store_si128 (ptr_scores,aux1); aux1 = _mm_add_epi32(_mm_cvtepi8_epi32(_mm_srli_si128(score,4)),v128); _mm_store_si128 (ptr_scores+1,aux1); aux1 = _mm_add_epi32(_mm_cvtepi8_epi32(_mm_srli_si128(score,8)),v128); _mm_store_si128 (ptr_scores+2,aux1); aux1 = _mm_add_epi32(_mm_cvtepi8_epi32(_mm_srli_si128(score,12)),v128); _mm_store_si128 (ptr_scores+3,aux1); // overflow detection aux1 = _mm_cmpeq_epi8(score,v127); overflow_flag = _mm_test_all_zeros(aux1,v127); // if overflow if (overflow_flag == 0){ // detect if overflow occurred in low-half, high-half or both halves aux1 = _mm_cmpeq_epi8(_mm_slli_si128(score,8),v127); bb1_start = _mm_test_all_zeros(aux1,v127); aux1 = _mm_cmpeq_epi8(_mm_srli_si128(score,8),v127); bb1_end = 2 - _mm_test_all_zeros(aux1,v127); // recalculate using 16-bit signed integer precision for (bb1=bb1_start; bb1<bb1_end ; bb1++){ // init buffers #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi16(-32768); #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi16(-32768); // set score to 0 score = _mm_set1_epi16(-32768); disp_2 = bb1*SSE_INT16_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*block_size; dim1 = n[s]-disp_4; dim1 = (block_size < dim1 ? block_size : dim1); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // calculate a[i] displacement disp_1 = dim1*SSE_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi16(-32768); #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi16(-32768); auxLastCol = _mm_set1_epi16(-32768); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*SSE_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm_sub_epi8(b_values, v16); // indexes < 16 aux2 = _mm_cmpgt_epi8(b_values,v15); aux3 = _mm_and_si128(aux2,vneg32); aux4 = _mm_add_epi8(b_values,aux3); ptr_scoreProfile1 = (__m128i*)(scoreProfile) + i; #pragma unroll for (j=0; j< SUBMAT_ROWS-1; j++) { tmp = (__m128i *) (submat + j*SUBMAT_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4); aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1); aux7 = _mm_add_epi8(aux5, aux6); _mm_store_si128(ptr_scoreProfile1+j*dim1, aux7); } _mm_store_si128(ptr_scoreProfile1+(SUBMAT_ROWS-1)*dim1, vzero_epi8); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous2 = lastCol[i+1]; previous3 = lastCol[i+2]; previous4 = lastCol[i+3]; // calculate score profile displacement ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_2); ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_2); ptr_scoreProfile3 = (__m128i *)(scoreProfile+((int)(ptr_a[i+2]))*disp_1+disp_2); ptr_scoreProfile4 = (__m128i *)(scoreProfile+((int)(ptr_a[i+3]))*disp_1+disp_2); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; aux3 = maxRow[i+2]; aux4 = maxRow[i+3]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current1 = _mm_adds_epi16(row1[j-1], _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile1+(j-1)))); // calculate current1 max value current1 = _mm_max_epi16(current1, aux1); current1 = _mm_max_epi16(current1, maxCol[j]); //current1 = _mm_max_epi16(current1, vzero_epi16); // update maxRow and maxCol aux1 = _mm_subs_epi16(aux1, vextend_gap_epi16); maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm_subs_epi16(current1, vopen_extend_gap_epi16); aux1 = _mm_max_epi16(aux1, aux0); maxCol[j] = _mm_max_epi16(maxCol[j], aux0); // update max score score = _mm_max_epi16(score,current1); //calcuate the diagonal value current2 = _mm_adds_epi16(previous2, _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile2+(j-1)))); // update previous previous2 = current1; // calculate current2 max value current2 = _mm_max_epi16(current2, aux2); current2 = _mm_max_epi16(current2, maxCol[j]); //current2 = _mm_max_epi16(current2, vzero_epi16); // update maxRow and maxCol aux2 = _mm_subs_epi16(aux2, vextend_gap_epi16); maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm_subs_epi16(current2, vopen_extend_gap_epi16); aux2 = _mm_max_epi16(aux2, aux0); maxCol[j] = _mm_max_epi16(maxCol[j], aux0); // update max score score = _mm_max_epi16(score,current2); //calcuate the diagonal value current3 = _mm_adds_epi16(previous3, _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile3+(j-1)))); // update previous previous3 = current2; // calculate current3 max value current3 = _mm_max_epi16(current3, aux3); current3 = _mm_max_epi16(current3, maxCol[j]); //current3 = _mm_max_epi16(current3, vzero_epi16); // update maxRow and maxCol aux3 = _mm_subs_epi16(aux3, vextend_gap_epi16); maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm_subs_epi16(current3, vopen_extend_gap_epi16); aux3 = _mm_max_epi16(aux3, aux0); maxCol[j] = _mm_max_epi16(maxCol[j], aux0); // update max score score = _mm_max_epi16(score,current3); //calcuate the diagonal value current4 = _mm_adds_epi16(previous4, _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile4+(j-1)))); // update previous previous4 = current3; // calculate current4 max value current4 = _mm_max_epi16(current4, aux4); current4 = _mm_max_epi16(current4, maxCol[j]); //current4 = _mm_max_epi16(current4, vzero_epi16); // update maxRow and maxCol aux4 = _mm_subs_epi16(aux4, vextend_gap_epi16); maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm_subs_epi16(current4, vopen_extend_gap_epi16); aux4 = _mm_max_epi16(aux4, aux0); maxCol[j] = _mm_max_epi16(maxCol[j], aux0); // update row buffer row2[j] = current4; // update max score score = _mm_max_epi16(score,current4); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; maxRow[i+2] = aux3; maxRow[i+3] = aux4; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; lastCol[i+2] = current2; lastCol[i+3] = current3; auxLastCol = current4; // swap buffers tmp = row1; row1 = row2; row2 = tmp; } } // store max value aux1 = _mm_add_epi32(_mm_cvtepi16_epi32(score),v32768); _mm_store_si128 (ptr_scores+bb1*2,aux1); aux1 = _mm_add_epi32(_mm_cvtepi16_epi32(_mm_srli_si128(score,8)),v32768); _mm_store_si128 (ptr_scores+bb1*2+1,aux1); // overflow detection aux1 = _mm_cmpeq_epi16(score,v32767); overflow_flag = _mm_test_all_zeros(aux1,v32767); // if overflow if (overflow_flag == 0){ // detect if overflow occurred in low-half, high-half or both halves aux1 = _mm_cmpeq_epi16(_mm_slli_si128(score,8),v32767); bb2_start = _mm_test_all_zeros(aux1,v32767); aux1 = _mm_cmpeq_epi16(_mm_srli_si128(score,8),v32767); bb2_end = 2 - _mm_test_all_zeros(aux1,v32767); // recalculate using 32-bit signed integer precision for (bb2=bb2_start; bb2<bb2_end ; bb2++){ // init buffers #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi32(0); #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi32(0); // set score to 0 score = _mm_set1_epi32(0); disp_3 = disp_2 + bb2*SSE_INT32_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*block_size; dim1 = n[s]-disp_4; dim1 = (block_size < dim1 ? block_size : dim1); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // calculate a[i] displacement disp_1 = dim1*SSE_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi32(0); #pragma unroll(SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi32(0); auxLastCol = _mm_set1_epi32(0); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*SSE_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm_sub_epi8(b_values, v16); // indexes < 16 aux2 = _mm_cmpgt_epi8(b_values,v15); aux3 = _mm_and_si128(aux2,vneg32); aux4 = _mm_add_epi8(b_values,aux3); ptr_scoreProfile1 = (__m128i*)(scoreProfile) + i; #pragma unroll for (j=0; j< SUBMAT_ROWS-1; j++) { tmp = (__m128i *) (submat + j*SUBMAT_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4); aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1); aux7 = _mm_add_epi8(aux5, aux6); _mm_store_si128(ptr_scoreProfile1+j*dim1, aux7); } _mm_store_si128(ptr_scoreProfile1+(SUBMAT_ROWS-1)*dim1, vzero_epi8); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous2 = lastCol[i+1]; previous3 = lastCol[i+2]; previous4 = lastCol[i+3]; // calculate score profile displacement ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_3); ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_3); ptr_scoreProfile3 = (__m128i *)(scoreProfile+((int)(ptr_a[i+2]))*disp_1+disp_3); ptr_scoreProfile4 = (__m128i *)(scoreProfile+((int)(ptr_a[i+3]))*disp_1+disp_3); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; aux3 = maxRow[i+2]; aux4 = maxRow[i+3]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current1 = _mm_add_epi32(row1[j-1], _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile1+(j-1)))); // calculate current1 max value current1 = _mm_max_epi32(current1, aux1); current1 = _mm_max_epi32(current1, maxCol[j]); current1 = _mm_max_epi32(current1, vzero_epi32); // update maxRow and maxCol aux1 = _mm_sub_epi32(aux1, vextend_gap_epi32); maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm_sub_epi32(current1, vopen_extend_gap_epi32); aux1 = _mm_max_epi32(aux1, aux0); maxCol[j] = _mm_max_epi32(maxCol[j], aux0); // update max score score = _mm_max_epi32(score,current1); //calcuate the diagonal value current2 = _mm_add_epi32(previous2, _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile2+(j-1)))); // update previous previous2 = current1; // calculate current2 max value current2 = _mm_max_epi32(current2, aux2); current2 = _mm_max_epi32(current2, maxCol[j]); current2 = _mm_max_epi32(current2, vzero_epi32); // update maxRow and maxCol aux2 = _mm_sub_epi32(aux2, vextend_gap_epi32); maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm_sub_epi32(current2, vopen_extend_gap_epi32); aux2 = _mm_max_epi32(aux2, aux0); maxCol[j] = _mm_max_epi32(maxCol[j], aux0); // update max score score = _mm_max_epi32(score,current2); //calcuate the diagonal value current3 = _mm_add_epi32(previous3, _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile3+(j-1)))); // update previous previous3 = current2; // calculate current3 max value current3 = _mm_max_epi32(current3, aux3); current3 = _mm_max_epi32(current3, maxCol[j]); current3 = _mm_max_epi32(current3, vzero_epi32); // update maxRow and maxCol aux3 = _mm_sub_epi32(aux3, vextend_gap_epi32); maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm_sub_epi32(current3, vopen_extend_gap_epi32); aux3 = _mm_max_epi32(aux3, aux0); maxCol[j] = _mm_max_epi32(maxCol[j], aux0); // update max score score = _mm_max_epi32(score,current3); //calcuate the diagonal value current4 = _mm_add_epi32(previous4, _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile4+(j-1)))); // update previous previous4 = current3; // calculate current4 max value current4 = _mm_max_epi32(current4, aux4); current4 = _mm_max_epi32(current4, maxCol[j]); current4 = _mm_max_epi32(current4, vzero_epi32); // update maxRow and maxCol aux4 = _mm_sub_epi32(aux4, vextend_gap_epi32); maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm_sub_epi32(current4, vopen_extend_gap_epi32); aux4 = _mm_max_epi32(aux4, aux0); maxCol[j] = _mm_max_epi32(maxCol[j], aux0); // update row buffer row2[j] = current4; // update max score score = _mm_max_epi32(score,current4); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; maxRow[i+2] = aux3; maxRow[i+3] = aux4; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; lastCol[i+2] = current2; lastCol[i+3] = current3; auxLastCol = current4; // swap buffers tmp = row1; row1 = row2; row2 = tmp; } } // store max value _mm_store_si128 (ptr_scores+bb1*2+bb2,score); } } } } } _mm_free(row1); _mm_free(row2); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol); _mm_free(scoreProfile); } *workTime = dwalltime()-tick; }
bucle-for.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n = 9; if(argc < 2) { fprintf(stderr,"\n[ERROR] - Falta no iteraciones \n"); exit(-1); } n = atoi(argv[1]); #pragma omp parallel for { for (i=0; i<n; i++) printf("thread %d ejecuta la iteración %d del bucle\n", omp_get_thread_num(),i); } return(0); }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) int main(void){ check_offloading(); int fail; double A[N], B[N], C[N], D[N], E[N]; INIT(); #if 0 // // Test: Execute on host // #pragma omp target if (target: C[0] == 0) { #pragma omp parallel for schedule(static,1) for (int i = 0; i < 992; i++) A[i] = C[i] + D[i] + omp_is_initial_device(); } fail = 0; VERIFY(0, N, A[i], i+2); if (fail) { printf ("Test1: Failed\n"); } else { printf ("Test1: Succeeded\n"); } #endif // // Test: Execute on device // #pragma omp target device(3) if (target: C[0] == 1) { #pragma omp parallel for schedule(static,1) for (int i = 0; i < 992; i++) A[i] = C[i] + D[i] + /*omp_is_initial_device()=*/1; // We cannot use omp_is_initial_device() directly because this is tested for // the host too. } // CHECK: Succeeded fail = 0; VERIFY(0, N, A[i], i+2); if (fail) { printf ("Test2: Failed\n"); } else { printf ("Test2: Succeeded\n"); } // // Test: Printf on device // #pragma omp target { printf ("Master %d\n", omp_get_thread_num()); int TT[2] = {0,0}; #pragma omp parallel num_threads(2) { if (omp_get_num_threads() == 1) { TT[omp_get_thread_num()]++; TT[omp_get_thread_num() + 1]++; } else TT[omp_get_thread_num()]++; } printf ("Parallel %d:%f\n", TT[0], D[0]); printf ("Parallel %d:%f\n", TT[1], D[1]); } return 0; }
radix_sort.h
#pragma once // #include <glm/glm.hpp> // #include <glm/gtc/type_ptr.hpp> #include <vector> #include <cstdint> #include <omp.h> typedef uint32_t uint; template <typename T, typename TUintVec4> void radixSortPartCountDigitsNth( int start, int num, int nthDigit, int digitOffset, const std::vector<T>& values, std::vector<TUintVec4>& digitCounts ) { for (int i = 0; i < num; ++i) { T val = values[start + i]; uint8_t digit = (val >> (nthDigit * 8)) & 0xff; ++digitCounts[digitOffset + digit][nthDigit]; } } template <typename T, typename TUintVec4> void radixSortPartCountDigits( int start, int num, int digitOffset, const std::vector<T>& values, std::vector<TUintVec4>& digitCounts ) { for (int i = 0; i < num; ++i) { T val = values[start + i]; uint8_t digit0 = val & 0xff; uint8_t digit1 = (val >> 8) & 0xff; uint8_t digit2 = (val >> 16) & 0xff; uint8_t digit3 = (val >> 24) & 0xff; ++digitCounts[digitOffset + digit0][0]; ++digitCounts[digitOffset + digit1][1]; ++digitCounts[digitOffset + digit2][2]; ++digitCounts[digitOffset + digit3][3]; } } template <typename T, typename TUintVec4> void radixSortPartByNthDigit( //const std::vector<T>& values, int start, int num, int nthDigit, int digitOffset, const std::vector<TUintVec4>& digitStarts, std::vector<TUintVec4>& digitPointers, const std::vector<uint>& inIndices, const std::vector<T>& inValues, std::vector<uint>& sortedIndices, std::vector<T>& sortedValues ) { // sort by nth digit for (int i = 0; i < num; ++i) { uint idx = inIndices[start + i]; T val = inValues[start + i]; uint8_t digit = (val >> (nthDigit * 8)) & 0xff; uint digitStart = digitStarts[digitOffset + digit][nthDigit]; uint& digitPointer = digitPointers[digitOffset + digit][nthDigit]; uint targetIndex = start + digitStart + digitPointer; ++digitPointer; sortedIndices[targetIndex] = idx; sortedValues[targetIndex] = val; } } template <typename T, typename TUintVec4> void radixSortRedistribute( int numGroups, uint group_size, int nthDigit, int digit, const std::vector<TUintVec4>& digitStarts, const std::vector<TUintVec4>& digitCounts, const std::vector<uint>& inIndices, const std::vector<T>& inValues, std::vector<uint>& outIndices, std::vector<T>& outValues ) { // start of digit block across all groups uint start = digitStarts[digit][nthDigit]; // collect items with digit out of each group for (int i = 0; i < numGroups; ++i) { uint digitOffset = (i + 1) * 256; uint digitCountInGroup = digitCounts[digitOffset + digit][nthDigit]; if (digitCountInGroup == 0) continue; uint digitStartInGroup = digitStarts[digitOffset + digit][nthDigit]; uint groupStart = i * group_size; for (int j = 0; j < digitCountInGroup; ++j) { uint target = start + j; outIndices[target] = inIndices[groupStart + digitStartInGroup + j]; outValues[target] = inValues[groupStart + digitStartInGroup + j]; } start += digitCountInGroup; } } template <typename T, typename TUintVec4> void radixWithRedistribution( int group_size, const std::vector<T>& values, //const T* values, //int numItems, std::vector<uint>& sortedIndices, std::vector<T>& sortedValues, std::vector<uint>& tmpSortedIndices, std::vector<T>& tmpSortedValues, std::vector<TUintVec4>& digitCounts, std::vector<TUintVec4>& digitStarts, std::vector<TUintVec4>& digitPointers ) { int numItems = values.size(); int numGroups = (int)(ceil((float)numItems / (float)group_size)); if (numGroups <= 0) { numGroups = 1; group_size = numItems; } // digit counters for radix sort of each workgroup, and total counts in first block of 256 digitCounts.resize(256 * (numGroups + 1)); digitStarts.resize(256 * (numGroups + 1)); digitPointers.resize(256 * (numGroups + 1)); #pragma omp parallel for for (int k = 0; k < digitPointers.size(); ++k) { digitCounts[k] = TUintVec4(0, 0, 0, 0); digitPointers[k] = TUintVec4(0, 0, 0, 0); } #pragma omp parallel for for (int i = 0; i < numItems; ++i) { sortedIndices[i] = i; sortedValues[i] = values[i]; } // count digit occurences for each group #pragma omp parallel for for (int i = 0; i < numGroups; ++i) { int start = group_size * i; int end = start + group_size; if (end > numItems) end = numItems; int num = end - start; radixSortPartCountDigits(start, num, (i + 1) * 256, sortedValues, digitCounts); } // sum digit occurences of each group into first block of 256 #pragma omp parallel for for (int k = 0; k < 256; ++k) { digitCounts[k] = TUintVec4(0, 0, 0, 0); for (int i = 0; i < numGroups; ++i) { digitCounts[k] += digitCounts[(i + 1) * 256 + k]; } } // compute starts of each digit block for each group and for total #pragma omp parallel for for (int i = -1; i < numGroups; ++i) // start from -1, to also include the first block containing total values { uint digitOffset = (i + 1) * 256; digitStarts[digitOffset] = TUintVec4(0, 0, 0, 0); for (int k = 1; k < 256; ++k) { digitStarts[digitOffset + k] = digitStarts[digitOffset + k - 1] + digitCounts[digitOffset + k - 1]; } } // sort by first digit in each group for (int nthDigit = 0; nthDigit < 4; ++nthDigit) { // all items have digit==0 , skip sort of this nthDigit, cause there is nothing to do if (digitCounts[0][nthDigit] == numItems) continue; #pragma omp parallel for for (int i = 0; i < numGroups; ++i) { int start = group_size * i; int end = start + group_size; if (end > numItems) end = numItems; int num = end - start; radixSortPartByNthDigit( start, num, nthDigit, (i + 1) * 256, digitStarts, digitPointers, sortedIndices, sortedValues, tmpSortedIndices, tmpSortedValues ); } // redistribute items across work groups // by collecting items for each digit #pragma omp parallel for for (int k = 0; k < 256; ++k) { radixSortRedistribute( numGroups, group_size, nthDigit, k, digitStarts, digitCounts, tmpSortedIndices, tmpSortedValues, sortedIndices, sortedValues ); } if (nthDigit == 3) break; // recount nth digit occurences for each group #pragma omp parallel for for (int i = 0; i < numGroups; ++i) { for (int k = 0; k < 256; ++k) { digitCounts[(i + 1) * 256 + k][nthDigit + 1] = 0; } int start = group_size * i; int end = start + group_size; if (end > numItems) end = numItems; int num = end - start; radixSortPartCountDigitsNth(start, num, nthDigit + 1, (i + 1) * 256, sortedValues, digitCounts); } // recompute starts of nth digit block for each group #pragma omp parallel for for (int i = 0; i < numGroups; ++i) // start from 0, to NOT include the first block containing total values { uint digitOffset = (i + 1) * 256; digitStarts[digitOffset][nthDigit + 1] = 0; for (int k = 1; k < 256; ++k) { digitStarts[digitOffset + k][nthDigit + 1] = digitStarts[digitOffset + k - 1][nthDigit + 1] + digitCounts[digitOffset + k - 1][nthDigit + 1]; } } } } template <typename T, typename TUintVec4> class RadixSorter { public: RadixSorter() : group_size(1024) {} RadixSorter(int group_size) : group_size(group_size) {} uint group_size; std::vector<T> values; std::vector<uint> sortedIndices; std::vector<T> sortedValues; void sort(const T* valuesPtr, int num) { values.resize(num); memcpy(values.data(), valuesPtr, sizeof(T)*num); sort(values); } void sort(const std::vector<T>& values) { int num = values.size(); sortedIndices.resize(num); sortedValues.resize(num); tmpSortedIndices.resize(num); tmpSortedValues.resize(num); radixWithRedistribution<T,TUintVec4>( group_size, values, sortedIndices, sortedValues, tmpSortedIndices, tmpSortedValues, digitCounts, digitStarts, digitPointers ); } protected: std::vector<uint> tmpSortedIndices; std::vector<T> tmpSortedValues; std::vector<TUintVec4> digitCounts; std::vector<TUintVec4> digitStarts; std::vector<TUintVec4> digitPointers; }; //template <typename T> //void radixWithRedistribution( // int group_size, // const std::vector<T>& values, // std::vector<uint>& sortedIndices, // std::vector<T>& sortedValues, // std::vector<uint>& tmpSortedIndices, // std::vector<T>& tmpSortedValues, // std::vector<TUintVec4>& digitCounts, // std::vector<TUintVec4>& digitStarts, // std::vector<TUintVec4>& digitPointers //) //{ // radixWithRedistribution( // group_size, // values.data(), // values.size(), // sortedIndices, // sortedValues, // tmpSortedIndices, // tmpSortedValues, // digitCounts, // digitStarts, // digitPointers // ); //} template <typename T, typename TUintVec4> void radixSortPart( const std::vector<T>& values, int start, int num, std::vector<uint>& sorted, std::vector<uint>& buf, std::vector<T>& valBuf, std::vector<T>& valBuf2 ) { TUintVec4 digitCounts[256]; TUintVec4 digitStarts[256]; TUintVec4 digitPointers[256]; for (int i = 0; i < 256; ++i) { digitCounts[i] = TUintVec4(0, 0, 0, 0); digitPointers[i] = TUintVec4(0, 0, 0, 0); } for (int i = 0; i < num; ++i) { T val = values[start + i]; uint8_t digit0 = val & 0xff; uint8_t digit1 = (val >> 8) & 0xff; uint8_t digit2 = (val >> 16) & 0xff; uint8_t digit3 = (val >> 24) & 0xff; ++digitCounts[digit0].x; ++digitCounts[digit1].y; ++digitCounts[digit2].z; ++digitCounts[digit3].w; } digitStarts[0] = TUintVec4(0, 0, 0, 0); for (int i = 1; i < 256; ++i) { digitStarts[i] = digitStarts[i - 1] + digitCounts[i - 1]; } // sort by 1st digit (least significant) for (int idx = 0; idx < num; ++idx) { T val = values[start + idx]; uint8_t digit0 = val & 0xff; uint targetIndex = start + digitStarts[digit0].x + digitPointers[digit0].x; ++digitPointers[digit0].x; buf[targetIndex] = start + idx; valBuf2[targetIndex] = val; } // sort by 2nd digit for (int i = 0; i < num; ++i) { int idx = buf[start + i]; T val = valBuf2[start + i]; uint8_t digit1 = (val >> 8) & 0xff; uint targetIndex = start + digitStarts[digit1].y + digitPointers[digit1].y; ++digitPointers[digit1].y; sorted[targetIndex] = idx; valBuf[targetIndex] = val; } // sort by 3rd digit for (int i = 0; i < num; ++i) { int idx = sorted[start + i]; T val = valBuf[start + i]; uint8_t digit2 = (val >> 16) & 0xff; uint targetIndex = start + digitStarts[digit2].z + digitPointers[digit2].z; ++digitPointers[digit2].z; buf[targetIndex] = idx; valBuf2[targetIndex] = val; } // sort by 4th digit (most significant) for (int i = 0; i < num; ++i) { int idx = buf[start + i]; T val = valBuf2[start + i]; uint8_t digit3 = (val >> 24) & 0xff; uint targetIndex = start + digitStarts[digit3].w + digitPointers[digit3].w; ++digitPointers[digit3].w; sorted[targetIndex] = idx; valBuf[targetIndex] = val; } } template <typename T, typename TUintVec4> void radixSortInParts( int group_size, const std::vector<T>& values, std::vector<uint>& sorted, std::vector<uint>& buf, std::vector<T>& valBuf, std::vector<T>& valBuf2 ) { int numItems = values.size(); sorted.resize(numItems); buf.resize(numItems); valBuf.resize(numItems); valBuf2.resize(numItems); int numGroups = (int)(ceil((float)numItems / (float)group_size)); if (numGroups <= 0) { numGroups = 1; group_size = numItems; } // omp_set_num_threads(64); //omp_set_num_threads(512); #pragma omp parallel for for (int i = 0; i < numGroups; ++i) { int start = group_size * i; int end = start + group_size; if (end > numItems) end = numItems; int num = end - start; radixSortPart<T,TUintVec4>(values, start, num, sorted, buf, valBuf, valBuf2); } } template <typename T, typename TUintVec4> void radixSort( const std::vector<T>& values, std::vector<uint>& sorted, std::vector<uint>& buf, std::vector<T>& valBuf, std::vector<T>& valBuf2 ) { TUintVec4 digitCounts[256]; TUintVec4 digitStarts[256]; TUintVec4 digitPointers[256]; for (int i = 0; i < 256; ++i) { digitCounts[i] = TUintVec4(0, 0, 0, 0); digitPointers[i] = TUintVec4(0, 0, 0, 0); } int num = values.size(); sorted.resize(num); buf.resize(num); valBuf.resize(num); valBuf2.resize(num); for (int i = 0; i < num; ++i) { T val = values[i]; uint8_t digit0 = val & 0xff; uint8_t digit1 = (val >> 8) & 0xff; uint8_t digit2 = (val >> 16) & 0xff; uint8_t digit3 = (val >> 24) & 0xff; ++digitCounts[digit0].x; ++digitCounts[digit1].y; ++digitCounts[digit2].z; ++digitCounts[digit3].w; } digitStarts[0] = TUintVec4(0, 0, 0, 0); for (int i = 1; i < 256; ++i) { digitStarts[i] = digitStarts[i - 1] + digitCounts[i - 1]; } // sort by 1st digit (least significant) for (int idx = 0; idx < num; ++idx) { T val = values[idx]; uint8_t digit0 = val & 0xff; uint targetIndex = digitStarts[digit0].x + digitPointers[digit0].x; ++digitPointers[digit0].x; buf[targetIndex] = idx; valBuf2[targetIndex] = val; //targets[idx] = targetIndex; //buf[idx] = targetIndex; //valBuf2[idx] = val; } // sort by 2nd digit for (int i = 0; i < num; ++i) { int idx = buf[i]; T val = valBuf2[i]; //T val = values[idx]; //T val = values[i]; uint8_t digit1 = (val >> 8) & 0xff; uint targetIndex = digitStarts[digit1].y + digitPointers[digit1].y; ++digitPointers[digit1].y; //sorted[idx] = targetIndex; sorted[targetIndex] = idx; valBuf[targetIndex] = val; //targets[i] = targetIndex; //sorted[i] = targetIndex; //valBuf[i] = val; } // sort by 3rd digit for (int i = 0; i < num; ++i) { int idx = sorted[i]; T val = valBuf[i]; //T val = values[i]; //T val = values[idx]; uint8_t digit2 = (val >> 16) & 0xff; uint targetIndex = digitStarts[digit2].z + digitPointers[digit2].z; ++digitPointers[digit2].z; buf[targetIndex] = idx; valBuf2[targetIndex] = val; //targets[i] = targetIndex; //buf[i] = targetIndex; //valBuf2[i] = val; } // sort by 4th digit (most significant) for (int i = 0; i < num; ++i) { int idx = buf[i]; T val = valBuf2[i]; //T val = values[i]; //T val = values[idx]; uint8_t digit3 = (val >> 24) & 0xff; uint targetIndex = digitStarts[digit3].w + digitPointers[digit3].w; ++digitPointers[digit3].w; //sorted[targetIndex] = val; sorted[targetIndex] = idx; valBuf[targetIndex] = val; //targets[i] = targetIndex; //sorted[i] = targetIndex; //valBuf[i] = val; } //for (int i = 0; i < num; ++i) //sorted[i] = values[i]; }
simd-7.c
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ extern void abort (); int a[1024] __attribute__((aligned (32))) = { 1 }; int b[1024] __attribute__((aligned (32))) = { 1 }; int k, m; struct U { int u; }; struct V { int v; }; __attribute__((noinline, noclone)) int foo (int *p) { int i, s = 0; struct U u; struct V v; #pragma omp simd aligned(a, p : 32) linear(k: m + 1) \ linear(i) reduction(+:s) lastprivate(u, v) for (i = 0; i < 1024; i++) { int *q = &i; a[i] *= p[i]; u.u = p[i] + k; k += m + 1; v.v = p[i] + k; s += p[i] + k; } if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024) abort (); return s; } __attribute__((noinline, noclone)) int bar (int *p) { int i, s = 0; struct U u; struct V v; #pragma omp simd aligned(a, p : 32) linear(k: m + 1) \ reduction(+:s) lastprivate(u, v) for (i = 0; i < 1024; i++) { int *q = &i; a[i] *= p[i]; u.u = p[i] + k; k += m + 1; v.v = p[i] + k; s += p[i] + k; } if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024 || i != 1024) abort (); return s; } int main () { #if __SIZEOF_INT__ >= 4 int i; k = 4; m = 2; for (i = 0; i < 1024; i++) { a[i] = i - 512; b[i] = (i - 51) % 39; } int s = foo (b); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i]) abort (); } if (k != 4 + 3 * 1024 || s != 1596127) abort (); k = 4; m = 2; for (i = 0; i < 1024; i++) { a[i] = i - 512; b[i] = (i - 51) % 39; } s = bar (b); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i]) abort (); } if (k != 4 + 3 * 1024 || s != 1596127) abort (); #endif return 0; }
GMS_cpu_perf_time_series_analysis.h
#ifndef __GMS_CPU_PERF_TIME_SERIES_ANALYSIS_H__ #define __GMS_CPU_PERF_TIME_SERIES_ANALYSIS_H__ #include <cstdint> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <algorithm> // std::sort #include "GMS_config.h" #include "Timsac_iface.h" #include "GMS_descriptive_statistics.hpp" #include "GMS_convert_numeric_data_types.hpp" #if !defined(DESCRIPTIVE_STATISTICS_DATA) #define DESCRIPTIVE_STATISTICS_DATA \ float __restrict * a = NULL; \ float __restrict * df32 = NULL; \ float w = 0.0f; \ float pw = 0.0f; \ int32_t ifault = -1; \ float srsd = 0.0f; \ float svar = 0.0f; \ float skew = 0.0f; \ float kurt = 0.0f; \ float autocor = 0.0f; \ float xmid = 0.0f; \ float xmean = 0.0f; \ float xmidm = 0.0f; \ float xmed = 0.0f; \ float smin = 0.0f; \ float smax = 0.0f; \ float xrange = 0.0f; \ float xsd = 0.0f; \ float xrelsd = 0.0f; \ float xvar = 0.0f; /* Apply Time-Series analysis (Timsac) subroutine "CANARM". The data itself is invariant from the point of view of specific subroutine i.e. "CANARM". Attempt to calculate the descritpive statistics if result of Wilk-Shapiro normality test allows it. */ __attribute__((hot)) __attribute__((aligned(32))) template<int32_t len, int32_t lagh> void cpu_perf_time_series_canarm(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict data_type, const bool use_omp) { static_assert(len <= 100000, "Input data length can not exceed -- **100000** elements!!"); FILE * fptr = fopen(fname,"a+"); if(__builtin_expect(NULL==fptr,0)) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } //const int32_t lagh = (int32_t)(std::sqrtf((int32_t)len)); const int32_t len2 = len/2; // shapiro-wilk 'a' array length. const std::size_t lag2len = (std::size_t)(lagh*lagh); const std::size_t lag3len = (std::size_t)lag2len*len; constexpr float w_limit = 0.05f; __attribute__((aligned(64))) double acor[lagh] = {}; __attribute__((aligned(64))) double acov[lagh] = {}; __attribute__((aligned(64))) double xarcoef[lagh] = {}; __attribute__((aligned(64))) double xv[lagh] = {}; __attribute__((aligned(64))) double xaic[lagh] = {}; __attribute__((aligned(64))) double xparcor[lagh] = {}; __attribute__((aligned(64))) double xdicm[lagh] = {}; __attribute__((aligned(64))) double xb[lagh] = {}; __attribute__((aligned(64))) double xa[lagh] = {}; __attribute__((aligned(64))) int32_t xm1[lagh] = {}; __attribute__((aligned(64))) int32_t xm2[lagh] = {}; __attribute__((aligned(64))) int32_t xpo[lagh] = {}; double __restrict * xw = NULL; double __restrict * xz = NULL; double __restrict * xRs = NULL; double __restrict * xchi = NULL; int32_t __restrict * xndt = NULL; double __restrict * xdic = NULL; double xoaic = 0.0; double xmean = 0.0; int32_t xmo = 0; int32_t xnc = 0; int32_t xk = 0; int32_t xl = 0; DESCRIPTIVE_STATISTICS_DATA const bool init = false; // swilk init argument. // OpenMP multithreaded calls to _mm_malloc (using parallel sections) // Multithreaded allocation for large dynamic arrays. if(use_omp) { #pragma omp parallel sections { #pragma section { xw = reinterpret_cast<double*>(_mm_malloc(lag3len*sizeof(double),64)); } #pragma section { xz = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); } #pragma section { xRs = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); } #pragma section { xchi = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); } #pragma section { xndt = reinterpret_cast<int32_t*>(_mm_malloc(lag2len*sizeof(int32_t),64)); } #pragma section { xdic = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); } #pragma section { a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); } #pragma section { df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); } } // Single thread checks the returned pointers!! const bool isnull = (NULL==a) || (NULL==xdic) || (NULL==xndt) || (NULL==xchi) || (NULL==xRs) || (NULL==xz) || (NULL==xw) || (NULL==df32); if(__builtin_expect(isnull,0)) {MALLOC_FAILED} } else { xw = reinterpret_cast<double*>(_mm_malloc(lag3len*sizeof(double),64)); if(__builtin_except(NULL==xw,0)) {MALLOC_FAILED} xz = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); if(__builtin_except(NULL==xz,0)) {MALLOC_FAILED} xRs = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); if(__builtin_except(NULL==xRs,0)) {MALLOC_FAILED} xchi = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); if(__builtin_except(NULL==xchi,0)) {MALLOC_FAILED} xndt = reinterpret_cast<int32_t*>(_mm_malloc(lag2len*sizeof(int32_t),64)); if(__builtin_except(NULL==xndt,0)) {MALLOC_FAILED} xdic = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); if(__builtin_except(NULL==xdic,0)) {MALLOC_FAILED} a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_except(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_except(NULL==df32,0)) {MALLOC_FAILED} } autcorf_(&data[0],&len,&acov[0],&acor[0],&lagh,&xmean); canarmf_(&len,&lagh,&acov[0],&xarcoef[0],&lagh,&xv[0],&xaic[0],&xoaic, &xmo,&xparcor[0],&xnc,&xm1[0],&xm2[0],&xw[0],&xz[0],&xRs[0], &xchi[0],&xndt[0],&xdic[0],&xdicm[0],&xpo[0],&xk,&xb[0],&xl, &xa[0],&lagh,&lagh); fprintf(fptr,"Data type: %s\n",data_type); fprintf(fptr,"mo=%.16f, oaic=%.16f, nc=%.16f, k=%.16f, l=%.16f\n",xmo,xoaic,xnc,xk,xl); fprintf(fptr, "arcoef, v, aic, parcor, dicm, b, a, m1, m2, po\n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fptr,"%.16f %.16f %.16f %.16f %.16f %.16f %.16f %d %d %.16f\n", xarcoef[i],xv[i],xaic[i],xparcor[i],xdicm[i],xb[i],xa[i],xm1[i],xm2[i],xpo[i]);} fprintf(fptr,"w\n"); for(int32_t i = 0; i != lag3len; ++i) {fprintf(fptr,"%.16f\n",xw[i]);} fprintf(fptr, "z, Rs, chi, ndt, dic\n"); for(int32_t i = 0; i != lag2len; ++i) {fprintf(fptr, " %.16f %.16f %.16f %d %.16f\n", xz[i],xRs[i],xchi[i],xndt[i],xdic[i]);} fprintf(fptr, "End of CANARMF results dump\n"); // Sort a samples arrays in ascending order //std::sort(data,data+len); cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); printf("Calling Shapiro-Wilk normality test subroutine!!\n"); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,n2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fptr,"Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",w,pw); if(pw<w_limit) fprintf(fptr,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fptr,"Descriptive Statistics calculations!!\n"); fprintf(fptr,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fptr,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fptr,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fptr,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fptr,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fptr,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fptr,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fptr,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fptr,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fptr); _mm_free(df32); _mm_free(a); _mm_free(xdic); _mm_free(xndt); _mm_free(xchi); _mm_free(xRs); _mm_free(xz); _mm_free(xw); } /* Apply Time-Series analysis (Timsac) subroutine "MULCOR". The data itself is invariant from the point of view of specific subroutine i.e. "MULCOR". No descriptive statistics computations for this function. */ #include <string> __attribute__((hot)) __attribute__((aligned(32))) template<int32_t ndim, int32_t ldim, int32_t lagh> void cpu_perf_time_series_mulcor(const double * __restrict __attribute__((aligned(64))) mvdata, //multivariable data const char * __restrict fname, const std::string * __restrict data_types){ static_assert(ndim <= 11, "Number of dimensions can not exceed 11!!"); static_assert(ldim <= 100000, "Number of elements per dimension can not exceed 100000!!"); FILE * fp = fopen(fname,"a+"); if(__builtin_expect(NULL==fp,0)) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } //const int32_t lagh = (int32_t)(2.0f*std::sqrt((float)ldim)); const int32_t totlen = ndim*ldim; const std::size_t mvd_len = (std::size_t)(lagh*ndim*ndim); __attribute__((aligned(64))) double xmean[ndim+6]; double * __restrict xcov = NULL; double * __restrict xcor = NULL; xcov = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcov,0)) {MALLOC_FAILED} xcor = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcor,0)) {MALLOC_FAILED} // Call TIMSAC MULCORF subroutine mulcorf_(&mvdata[0],&totlen,&ndim,&lagh,&xmean[0],&xcov[0],&xcor[0]); for(int32_t i = 0; i != ndim; ++i) {fprintf(fp,"Data types: %s\n",data_types[i].c_str()); fprintf(fp,"Multivariate mean\n"); for(int32_t i = 0; i != n_dim; ++i) { fprintf(fp,"%.16f\n",xmean[i]);} fprintf(fp1,"Multivariate Correlation and Covariance\n"); for(int32_t i = 0; i != lagh*n_dim*n_dim; ++i) {fprintf(fp,"%.16f %.16f\n",xcor[i],xcov[i]);} fclose(fp1); _mm_free(xcor); _mm_free(xcov); } /* Apply Time-Series analysis (Timsac) subroutine "MULSPE". The data itself is invariant from the point of view of specific subroutine i.e. "MULSPE". No descriptive statistics computations for this function. */ __attribute__((hot)) __attribute__((aligned(32))) template<int32_t ndim, int32_t ldim, int32_t lagh> void cpu_perf_time_series_mulspe(const double * __restrict __attribute__((aligned(64))) mvdata, // Multidimensional data const char * __restrict fname, const std::string * __restrict data_types, const bool use_omp) { static_assert(ndim <= 11, "Number of dimensions can not exceed 11!!"); static_assert(ldim <= 100000, "Number of elements per dimension can not exceed 100000!!"); FILE * fp = fopen(fname,"a+"); if(__builtin_expect(NULL==fp,0)) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } //const int32_t lagh = (int32_t)(2.0f*std::sqrt((float)ldim)); const std::size_t mvd_len = (std::size_t)(lagh*ndim*mdim); const int32_t totlen = ndim*ldim; __attribute__((aligned(64))) double xmean[ndim+6]; __attribute__((aligned(64))) double xstat[ndim]; // MULCOR data double * __restrict xcov = NULL; double * __restrict xcor = NULL; // MULSPE data double * __restrict xspec1 = NULL; double * __restrict xspec2 = NULL; double * __restrict xcoh1 = NULL; double * __restrict xcoh2 = NULL; if(use_omp) { #pragma omp parallel sections { #pragma omp section { xcov = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xcor = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xspec1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xspec2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xcoh1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xcoh2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } } //Single thread (main) checks for null pointers. const bool isnull = (NULL==xcov) || (NULL==xcor) || (NULL==xspec1) || (NULL==xspec2) || (NULL==xcoh1) || (NULL==xcoh2); if(__builtin_expect(isnull,0)) {MALLOC_FAILED} } else { xcov = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__bultin_expect(NULL==xcov,0)) {MALLOC_FAILED} xcor = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcor,0)) {MALLOC_FAILED} xspec1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xspec1,0)) {MALLOC_FAILED} xspec2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xspec2,0)) {MALLOC_FAILED} xcoh1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcoh1,)) {MALLOC_FAILED} xcoh2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcoh2,0)) {MALLOC_FAILED} } // Call MULCORF subroutine mulcorf_(&mvd_data[0],&totlen,&ndim,&lagh,&xmean[0],&xcov[0],&xcor[0]); // Call MULSPE subroutine mulspef_(&tot_len,&ndim,&lagh,&lagh,&xcov[0],&xspec1[0],&xspec2[0], &xstat[0],&xcoh1[0],&xcoh2[0]); for(int32_t i = 0; i != ndim; ++i) {fprintf(fp,"Data types: %s\n",data_types[i].c_str()); fprintf(fp, "Spectrum real part, imaginary part\n"); for(int32_t i = 0; i != (int32_t)(mvd_len); ++i) { fprintf(fp,"%.16f : %.16f\n",xspec1[i],xspec2[i]);} fprintf(fp, "Test Statistics\n"); for(int32_t i = 0; i != ndim; ++i) { fprintf(fp, "%.16f\n", xstat[i]);} fprintf(fp, "Simple coherence1, coherence2 \n"); for(int32_t i = 0; i != (int32_t)(mvd_len); ++i) {fprintf(fp,"%.16f , %.16f\n",xcoh1[i],xcoh2[i]);} fclose(fp); _mm_free(xcoh2); _mm_free(xcoh1); _mm_free(xspec2); _mm_free(xspec1); _mm_free(xcor); _mm_free(xcov); } /* Apply Time-Series analysis (Timsac) subroutine "UNIMAR". The data itself is invariant from the point of view of specific subroutine i.e. "UNIMAR". Attempt to calculate the descritpive statistics if result of Wilk-Shapiro normality test allows it. */ __attribute__((hot)) __attribute__((aligned(32))) template<int32_t len, int32_t lagh> void cpu_perf_time_series_unimar(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict data_type) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); FILE * fp = fopen(fname,"a+"); if(__builtin_expect(NULL==fp,0)) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; __attribute__((aligned(64))) double xv[lagh+1]; __attribute__((aligned(64))) double xaic[lagh+1]; __attribute__((aligned(64))) double xdaic[lagh+1]; __attribute__((aligned(64))) double xa[lagh]; double xmean = 0.0; double xvar = 0.0; double xaicm = 0.0; double xvm = 0.0; int32_t xm = 0; char pad[4]; DESCRIPTIVE_STATISTICS_DATA const bool init = false; // swilk init argument. a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} unimarf_(&data[0],&len,&lagh,&xmean,&xvar,&xv[0],&xaic[0],&xdaic[0], &xm,&xaicm,&xvm,&xa[0]); fprintf(fp,"Data type: %s, Method: Univariate Autoregressive AR Model Fitting\n",data_type); fprintf(fp,"\nmean=%.16f,var=%.16f,aicm=%.16f,vm=%.16f,xm=%d\n", xmean, xvar,xaicm,xvm,xm); fprintf(fp," V, AIC, DAIC\n"); for(int32_t i = 0; i != lagh+1; ++i) {fprintf(fp," %.16f %.16f %.16f %.16f\n",xv[i],xaic[i],xdaic[i]);} fprintf(fp, "A\n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fp," %.16f\n",xa[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"Data type: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",data_type,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(df32); _mm_free(a); } /* Apply Time-Series analysis (Timsac) subroutine "UNIBAR". The data itself is invariant from the point of view of specific subroutine i.e. "UNIBAR". */ __attribute__((hot)) __attribute__((aligned(32))) template<int32_t len,int32_t lagh> void cpu_perf_time_series_unibar(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict data_type) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); FILE * fp = fopen(fname,"a+"); if(__builtin_expect(NULL==fp,0)) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; __attribute__((aligned(64))) double xv[lagh+1]; __attribute__((aligned(64))) double xaic[lagh+1]; __attribute__((aligned(64))) double xdaic[lagh+1]; __attribute__((aligned(64))) double xpa[lagh]; __attribute__((aligned(64))) double xbw[lagh+1]; __attribute__((aligned(64))) double xsbw[lagh]; __attribute__((aligned(64))) double xpab[lagh]; __attribute__((aligned(64))) double xa[lagh]; __attribute__((aligned(64))) double xpxx[128]; double xmean = 0.0; double xvar = 0.0; double xaicm = 0.0; double xvm = 0.0; double xaicb = 0.0; double xvb = 0.0; double xpn = 0.0; int32_t xm = 0; char pad[4]; DESCRIPTIVE_STATISTICS_DATA const bool init = false; // swilk init argument. a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} unibarf_(&data[0],&len,&lagh,&xmean,&xvar,&xv[0],&xaic[0],&xdaic[0], &xm,&xaicm,&xvm,&xpa[0],&xbw[0],&xsbw[0],&xpab[0],&xaicb, &xvb,&xpn,&xa[0],&xpxx[0]); fprintf(fp,"Data type: %s, Method: Univariate Bayesian Method of AR Model Fitting\n",data_type); fprintf(fp,"\nxmean=%.16f,xvar=%.16f,xaicm=%.16f,xvm=%.16f,xaicb=%.16f,xvb=%.16f,xpn=%.16f,xm=%d\n",xmean, xvar,xaicm,xvm,xaicb,xvb,xpn,xm); fprintf(fp," V, AIC, DAIC, BW\n"); for(int32_t i = 0; i != (lagh+1); ++i) {fprintf(fp," %.16f %.16f %.16f %.16f\n",xv[i],xaic[i],xdaic[i],xbw[i]);} fprintf(fp, " PA, SBW, PAB, A\n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fp," %.16f %.16f %.16f %.16f\n", xpa[i],xsbw[i],xpab[i],xa[i]);} fprintf(fp, " PXX\n"); for(int32_t i = 0; i != 128; ++i) {fprintf(fp, "%.16f\n",pxx[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"Data type: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",data_type,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(df32); _mm_free(a); } /* Apply Time-Series analysis (Timsac) subroutine "EXSAR". The data itself is invariant from the point of view of specific subroutine i.e. "EXSAR". */ __attribute__((hot)) __attribute__((aligned(32))); template<int32_t len,int32_t lagh> void cpu_perf_time_series_exsar( const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict data_type) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); FILE * fp = fopen(fname,"a+"); if(__builtin_expect(NULL==fp,0)) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; __attribute__((aligned(64))) double xv[lagh+1]; __attribute__((aligned(64))) double xaic[lagh+1]; __attribute__((aligned(64))) double xdaic[lagh+1]; __attribute__((aligned(64))) double xa1[lagh]; __attribute__((aligned(64))) double xa2[lagh]; double xmean = 0.0; double xvar = 0.0; double xaicm = 0.0; double xsdm1 = 0.0; double xsdm2 = 0.0; char pad1[4]; int32_t xier = 0; int32_t xm = 0; char pad2[4]; DESCRIPTIVE_STATISTICS_DATA const bool init = false; // swilk init argument. a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} exsarf_(&data[0],&len,&lagh,&xmean,&xvar,&xv[0],&xaic[0],&xdaic[0], &xm,&xaicm,&xsdm1,&xa1[0],&xsdm2,&xa2[0],&xier); fprintf(fp,"HW Metric: %s, Maximum Likelihood Estimation\n", metric_name); fprintf(fp,"xmean=%.16f,xvar=%.16f,xaicm=%.16f,xsdm1=%.16f,xsdm2=%.16f,xier=%d,xm=%d\n", xmean,xvar,xaicm,xsdm1,xsdm2,xier,xm); fprintf(fp,"V, AIC, DAIC \n"); for(int32_t i = 0; i != lagh1; ++i) {fprintf(fp," %.16f %.16f %.16f\n", xv[i],xaic[i],xdaic[i]);} fprintf(fp," A1, A2 \n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fp, " %.16f %.16f\n", xa1[i],xa2[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"Data type: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",data_type,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(df32); _mm_free(a); } /* Apply Time-Series analysis (Timsac) subroutine "BISPEC". The data itself is invariant from the point of view of specific subroutine i.e. "BISPEC". */ __attribute__((hot)) __attribute__((aligned(32))); template<int32_t len,int32_t lagh> void cpu_perf_time_series_bispec(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict data_type, const bool use_omp) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); FILE * fp = fopen(fname,"a+"); if(__builtin_expect(NULL==fp,0)) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } const int32_t lg12x = lagh*lagh+7; const std::size_t lagh_len = static_cast<std::size_t>(lg12x);\ const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; const bool init = false; // swilk init argument. __attribute__((aligned(64))) double acor[lagh+7]; __attribute__((aligned(64))) double acov[lagh+7]; __attribute__((aligned(64))) double pspec1[lagh+7]; __attribute__((aligned(64))) double psepc2[lagh+7]; __attribute__((aligned(64))) double sig[lagh+7]; double * __restrict mnt = NULL; double * __restrict ch = NULL; double * __restrict br = NULL; double * __restrict bi = NULL; double xmean = 0.0; double xrat = 0.0; // BISPECF result DESCRIPTIVE_STATISTICS_DATA if(use_omp) { #pragma omp parallel section { #pragma omp section { mnt = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); } #pragma omp section { ch = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); } #pragma omp section { br = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); } #pragma omp section { bi = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); } #pragma omp section { a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); } #pragma omp section { df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); } } const bool isnull = (NULL==mnt) || (NULL==ch) || (NULL==ch) || (NULL==br) || (NULL==bi) || (NULL==a) || (NULL==df32); if(__builtin_exppect(isnull,0)) {MALLOC_FAILED} } else { mnt = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==mnt,0)) {MALLOC_FAILED} ch = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==ch,0)) {MALLOC_FAILED} br = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==br,0)) {MALLOC_FAILED} bi = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==bi,0)) {MALLOC_FAILED} a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} } thirmof_(&len,&lagh,&data[0],&xmean,&acov[0],&acor[0],&mnt[0]); bispecf_(&len,&lagh,&data[0],&mnt[0],&pspec1[0],&pspec2[0], &sig[0],&br[0],&bi[0],&xrat); fprintf(fp,"Data type: %s, Bi-Spectrum Decomposition\n",data_type); fprintf(fp,"xrat=%.16f\n",xrat); fprintf(fp," %s -- Smoothed Power Spectrum-1, Power Spectrum-2 and Significance\n", metric_name); for(int32_t i = 0; i != lagh; ++i) { fprintf(fp, "%.16f %.16f %.16f\n", psepc1[i],pspec2[i],sig[i]);} fprintf(fp, " %S -- Coherence, Real part, Imaginary part\n"); for(int32_t i = 0; i != lg12x; ++i) { fprintf(fp, "%.16f %.16f %.16f\n",ch[i],br[i],bi[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"Data type: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",data_type,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp1); _mm_free(bi); _mm_free(br); _mm_free(ch); _mm_free(mnt); _mm_free(df32); _mm_free(a); } /* Apply Time-Series analysis (Timsac) subroutine "THIRMO". The data itself is invariant from the point of view of specific subroutine i.e. "THIRMO". */ __attribute__((hot)) __attribute__((aligned(32))); template<int32_t len,int32_t lagh> void cpu_perf_time_series_thirmo(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict data_type) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); FILE * fp = fopen(fname,"a+"); if(__builtin_expect(NULL==fp,0)) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } const int32_t lg12x = lagh*lagh+7; const std::size_t lagh_len = static_cast<std::size_t>(lg12x); const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; const bool init = false; // swilk init argument. __attribute__((aligned(64))) double acor[lagh+7]; __attribute__((aligned(64))) double acov[lagh+7]; double * __restrict mnt = NULL double xmean = 0.0; DESCRIPTIVE_STATISTICS_DATA mnt = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==mnt,0)) {MALLOC_FAILED} a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} thirmof_(&len,&lagh,&data[0],&xmean,&acov[0],&acor[0],&mnt[0]); fprintf(fp,"Data type: %s Third Moments\n",data_type); fprintf(fp,"xmean=%.16f\n",xmean); fprintf(fp,"ACOV, ACOR\n"); for(int32_t i = 0; i != lagh; ++i) { fprintf(fp, "%.16f %.16f\n", acov[i],acor[i]);} fprintf(fp," %S -- Third Moment\n",metric_name); for(int32_t i = 0; i != lg12x; ++i) { fprintf(fp, "%.16f\n",mnt[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"Data type: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",data_type,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(mnt); } /* Apply Time-Series analysis (Timsac) subroutine "AUTOCOR". The data itself is invariant from the point of view of specific subroutine i.e. "AUTOCOR". */ __attribute__((hot)) __attribute__((aligned(32))); template<int32_t len,int32_t lagh> void cpu_perf_time_series_autocor(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict data_type) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); FILE * fp = fopen(fname,"a+"); if(__builtin_expect(NULL==fp,0)) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; const bool init = false; // swilk init argument. __attribute__((aligned(64))) double acor[lagh+8]; __attribute__((aligned(64))) double acov[lagh+8]; double xmean = 0.0; DESCRIPTIVE_STATISTICS_DATA a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} autocorf_(&data,&len,&acov[0],&acor[0],&lagh,&xmean); fprintf(fp,"Data type: %s\n",data_type); fprintf(fp,"xmean=%.16f\n",xmean); fprintf(fp," Series Autocorrelation and Autocovariance.\n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fp,"%.16f %.16f\n",acor[i],acov[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"Data type: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",data_type,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(df32); _mm_free(a); } #endif /*__GMS_CPU_PERF_TIME_SERIES_ANALYSIS_H__*/
PerturbHaloField.c
// Re-write of update_halo_pos from the original 21cmFAST // ComputePerturbHaloField reads in the linear velocity field, and uses // it to update halo locations with a corresponding displacement field int ComputePerturbHaloField(float redshift, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options, struct InitialConditions *boxes, struct HaloField *halos, struct PerturbHaloField *halos_perturbed) { int status; Try{ // This Try brackets the whole function, so we don't indent. LOG_DEBUG("input value:"); LOG_DEBUG("redshift=%f", redshift); #if LOG_LEVEL >= DEBUG_LEVEL writeUserParams(user_params); writeCosmoParams(cosmo_params); writeAstroParams(flag_options, astro_params); writeFlagOptions(flag_options); #endif // Makes the parameter structs visible to a variety of functions/macros // Do each time to avoid Python garbage collection issues Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); omp_set_num_threads(user_params->N_THREADS); float growth_factor, displacement_factor_2LPT, mass, xf, yf, zf, z, growth_factor_over_BOX_LEN,displacement_factor_2LPT_over_BOX_LEN; int i,j,k, i_halo,xi, yi, zi, DI, dimension; unsigned long long ct; float dz = 1e-10; LOG_DEBUG("Begin Initialisation"); // Function for deciding the dimensions of loops when we could // use either the low or high resolution grids. switch(user_params->PERTURB_ON_HIGH_RES) { case 0: dimension = user_params->HII_DIM; break; case 1: dimension = user_params->DIM; break; } // ***************** END INITIALIZATION ***************** // init_ps(); growth_factor = dicke(redshift); // normalized to 1 at z=0 displacement_factor_2LPT = -(3.0/7.0) * growth_factor*growth_factor; // 2LPT eq. D8 growth_factor_over_BOX_LEN = growth_factor / user_params->BOX_LEN; displacement_factor_2LPT_over_BOX_LEN = displacement_factor_2LPT / user_params->BOX_LEN; // now add the missing factor of Ddot to velocity field #pragma omp parallel shared(boxes,dimension,growth_factor_over_BOX_LEN) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { boxes->hires_vx[R_INDEX(i,j,k)] *= growth_factor_over_BOX_LEN; boxes->hires_vy[R_INDEX(i,j,k)] *= growth_factor_over_BOX_LEN; boxes->hires_vz[R_INDEX(i,j,k)] *= growth_factor_over_BOX_LEN; } else { boxes->lowres_vx[HII_R_INDEX(i,j,k)] *= growth_factor_over_BOX_LEN; boxes->lowres_vy[HII_R_INDEX(i,j,k)] *= growth_factor_over_BOX_LEN; boxes->lowres_vz[HII_R_INDEX(i,j,k)] *= growth_factor_over_BOX_LEN; } // this is now comoving displacement in units of box size } } } } // ************************************************************************* // // BEGIN 2LPT PART // // ************************************************************************* // // reference: reference: Scoccimarro R., 1998, MNRAS, 299, 1097-1118 Appendix D if(global_params.SECOND_ORDER_LPT_CORRECTIONS){ // now add the missing factor in eq. D9 #pragma omp parallel shared(boxes,displacement_factor_2LPT_over_BOX_LEN,dimension) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { boxes->hires_vx_2LPT[R_INDEX(i,j,k)] *= displacement_factor_2LPT_over_BOX_LEN; boxes->hires_vy_2LPT[R_INDEX(i,j,k)] *= displacement_factor_2LPT_over_BOX_LEN; boxes->hires_vz_2LPT[R_INDEX(i,j,k)] *= displacement_factor_2LPT_over_BOX_LEN; } else { boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] *= displacement_factor_2LPT_over_BOX_LEN; boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] *= displacement_factor_2LPT_over_BOX_LEN; boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] *= displacement_factor_2LPT_over_BOX_LEN; } // this is now comoving displacement in units of box size } } } } } // ************************************************************************* // // END 2LPT PART // // ************************************************************************* // unsigned long long n_halos; halos_perturbed->n_halos = halos->n_halos; halos_perturbed->halo_masses = malloc(sizeof(float) * halos->n_halos); halos_perturbed->halo_coords = malloc(sizeof(int) * halos->n_halos * 3); // ****************** END INITIALIZATION ******************************** // float mean_correction = 0.0, mean_correction_2LPT = 0.0, mean_ratio = 0.0; float max_correction = 1e-10, max_correction_2LPT = 1e-10, max_ratio = 1e-10; int den = 0; #pragma omp parallel shared(boxes,halos,halos_perturbed) \ private(i_halo,i,j,k,xf,yf,zf) num_threads(user_params->N_THREADS) { #pragma omp for for (i_halo=0; i_halo<halos->n_halos; i_halo++){ // convert location to fractional value xf = halos->halo_coords[i_halo*3+0]/(user_params->DIM + 0.); yf = halos->halo_coords[i_halo*3+1]/(user_params->DIM + 0.); zf = halos->halo_coords[i_halo*3+2]/(user_params->DIM + 0.); // determine halo position (downsampled if required) if(user_params->PERTURB_ON_HIGH_RES) { i = halos->halo_coords[i_halo*3+0]; j = halos->halo_coords[i_halo*3+1]; k = halos->halo_coords[i_halo*3+2]; } else { i = xf * user_params->HII_DIM; j = yf * user_params->HII_DIM; k = zf * user_params->HII_DIM; } // get new positions using linear velocity displacement from z=INITIAL if(user_params->PERTURB_ON_HIGH_RES) { xf += boxes->hires_vx[R_INDEX(i,j,k)]; yf += boxes->hires_vy[R_INDEX(i,j,k)]; zf += boxes->hires_vz[R_INDEX(i,j,k)]; } else { xf += boxes->lowres_vx[HII_R_INDEX(i,j,k)]; yf += boxes->lowres_vy[HII_R_INDEX(i,j,k)]; zf += boxes->lowres_vz[HII_R_INDEX(i,j,k)]; } // 2LPT PART // add second order corrections if(global_params.SECOND_ORDER_LPT_CORRECTIONS){ if(user_params->PERTURB_ON_HIGH_RES) { xf -= boxes->hires_vx_2LPT[R_INDEX(i,j,k)]; yf -= boxes->hires_vy_2LPT[R_INDEX(i,j,k)]; zf -= boxes->hires_vz_2LPT[R_INDEX(i,j,k)]; } else { xf -= boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)]; yf -= boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)]; zf -= boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)]; } } // check if we wrapped around, note the casting to ensure < 1.00000 DI = 10000; xf = roundf(xf*DI); yf = roundf(yf*DI); zf = roundf(zf*DI); while (xf >= (float)DI){ xf -= DI;} while (xf < 0){ xf += DI;} while (yf >= (float)DI){ yf -= DI;} while (yf < 0){ yf += DI;} while (zf >= (float)DI){ zf -= DI;} while (zf < 0){ zf += DI;} xf = fabs(xf/(float)DI); // fabs gets rid of minus sign in -0.00000 yf = fabs(yf/(float)DI); zf = fabs(zf/(float)DI); xf *= user_params->HII_DIM; yf *= user_params->HII_DIM; zf *= user_params->HII_DIM; halos_perturbed->halo_coords[i_halo*3+0] = xf; halos_perturbed->halo_coords[i_halo*3+1] = yf; halos_perturbed->halo_coords[i_halo*3+2] = zf; halos_perturbed->halo_masses[i_halo] = halos->halo_masses[i_halo]; } } // Divide out multiplicative factor to return to pristine state #pragma omp parallel shared(boxes,growth_factor_over_BOX_LEN,dimension,displacement_factor_2LPT_over_BOX_LEN) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<dimension; i++){ for (j=0; j<dimension; j++){ for (k=0; k<dimension; k++){ if(user_params->PERTURB_ON_HIGH_RES) { boxes->hires_vx[R_INDEX(i,j,k)] /= growth_factor_over_BOX_LEN; boxes->hires_vy[R_INDEX(i,j,k)] /= growth_factor_over_BOX_LEN; boxes->hires_vz[R_INDEX(i,j,k)] /= growth_factor_over_BOX_LEN; if(global_params.SECOND_ORDER_LPT_CORRECTIONS){ boxes->hires_vx_2LPT[R_INDEX(i,j,k)] /= displacement_factor_2LPT_over_BOX_LEN; boxes->hires_vy_2LPT[R_INDEX(i,j,k)] /= displacement_factor_2LPT_over_BOX_LEN; boxes->hires_vz_2LPT[R_INDEX(i,j,k)] /= displacement_factor_2LPT_over_BOX_LEN; } } else { boxes->lowres_vx[HII_R_INDEX(i,j,k)] /= growth_factor_over_BOX_LEN; boxes->lowres_vy[HII_R_INDEX(i,j,k)] /= growth_factor_over_BOX_LEN; boxes->lowres_vz[HII_R_INDEX(i,j,k)] /= growth_factor_over_BOX_LEN; if(global_params.SECOND_ORDER_LPT_CORRECTIONS){ boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] /= displacement_factor_2LPT_over_BOX_LEN; boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] /= displacement_factor_2LPT_over_BOX_LEN; boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] /= displacement_factor_2LPT_over_BOX_LEN; } } // this is now comoving displacement in units of box size } } } } fftwf_cleanup_threads(); fftwf_cleanup(); fftwf_forget_wisdom(); LOG_DEBUG("Perturbed positions of %d Halos", halos_perturbed->n_halos); } // End of Try() Catch(status){ return(status); } return(0); } void free_phf(struct PerturbHaloField* halos){ free(halos->halo_masses); free(halos->halo_coords); halos->n_halos = 0; }
jacobi_omp.c
/* * Copyright (c) 2008, BSC (Barcelon Supercomputing Center) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the <organization> nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> #include <time.h> #define NB 32 #define B 128 #define FALSE (0) #define TRUE (1) typedef double fp_type; typedef fp_type *vin; typedef fp_type *vout; typedef fp_type *bin; typedef fp_type *binout; fp_type *A[NB][NB]; fp_type *A_new[NB][NB]; fp_type *tmp[NB][NB]; void alloc_and_genmat() { int init_val, i, j, ii, jj; fp_type *p, *p_new; init_val = 1325; for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL) { printf("Out of memory\n"); exit(1); } p = A[ii][jj]; p_new = A_new[ii][jj]; for (i = 0; i < B; i++) { for (j = 0; j < B; j++) { init_val = (3125 * init_val) % 65536; (*p) = (fp_type)((init_val - 32768.0) / 16384.0); (*p_new) = (*p); p++; p_new++; } } } } } long usecs(void) { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec * 1000000 + t.tv_usec; } void clear(vout v) { int i, j, k; for (i = 0; i < B; i++) v[i] = (fp_type)0.0; } void getlastrow(bin A, vout v) { int j; for (j = 0; j < B; j++) v[j] = A[(B - 1) * B + j]; } void getlastcol(bin A, vout v) { int i; for (i = 0; i < B; i++) v[i] = A[i * B + B - 1]; } void getfirstrow(bin A, vout v) { int j; for (j = 0; j < B; j++) v[j] = A[0 * B + j]; } void getfirstcol(bin A, vout v) { int i; for (i = 0; i < B; i++) v[i] = A[i * B + 0]; } void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new) { int i, j; fp_type tmp; fp_type left, top, right, bottom; for (i = 0; (i < B); i++) { for (j = 0; j < B; j++) { tmp = A[i * B + j]; left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]); top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]); right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]); bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]); A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom); } } } double maxdelta() { double dmax = -__DBL_MAX__; int ii, jj, i, j; #pragma omp parallel for schedule(static) reduction(max: dmax) for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { for (i = 0; (i < B); i++) { for (j = 0; j < B; j++) { double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]); if(diff > dmax) dmax = diff; } } } } return dmax; } void compute(int niters) { int iters; int ii, jj; fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B]; double delta = 2.0; double epsilon = 1e-7; iters = 0; // for (iters = 0; iters < niters; iters++) while(iters < niters) { ++iters; #pragma omp parallel \ private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \ shared(A, A_new) { #pragma omp for schedule(static) for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { if (ii > 0) getlastrow(A[ii - 1][jj], tophalo); else clear(tophalo); if (jj > 0) getlastcol(A[ii][jj - 1], lefthalo); else clear(lefthalo); if (ii < NB - 1) getfirstrow(A[ii + 1][jj], bottomhalo); else clear(bottomhalo); if (jj < NB - 1) getfirstcol(A[ii][jj + 1], righthalo); else clear(lefthalo); jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj]); } // jj } // ii } // end parallel delta = maxdelta(); printf("iteration %d: delta = %e\n", iters, delta); // yes, this is an inefficient copy // however, the library version requires you to do a copy in this way // on all of the component parts to avoid segmentation fault #pragma omp parallel for schedule(static) shared(A, A_new) for(int i = 0; i < NB; ++i) { for(int j = 0; j < NB; ++j) { for(int k = 0; k < B; ++k) for(int l = 0; l < B; ++l) A[i][j][k * B + l] = A_new[i][j][k * B + l]; } } } // iter } int main(int argc, char *argv[]) { int niters; // pp_time_t tm; // memset( &tm, 0, sizeof(tm) ); struct timespec start, end; if (argc > 1) { niters = atoi(argv[1]); } else niters = 1; alloc_and_genmat(); clock_gettime(CLOCK_MONOTONIC, &start); compute(niters); clock_gettime(CLOCK_MONOTONIC, &end); double time_taken = (end.tv_sec - start.tv_sec) * 1e9; time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9; printf("Running time = %g %s\n", time_taken, "s"); /* FILE *outFile; outFile = fopen("./jacobi_omp_values.txt", "w"); if (outFile == NULL) { fprintf(stderr, "Error writing to file\n"); } else { int ii, jj, i, j; for (ii = 0; ii < NB; ++ii) for (jj = 0; jj < NB; ++jj) for (i = 0; i < B; ++i) for (j = 0; j < B; ++j) fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]); fclose(outFile); } */ return 0; }
GB_emult_04.c
//------------------------------------------------------------------------------ // GB_emult_04: C<M>= A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C<M>= A.*B, M sparse/hyper, A and B bitmap/full. C has the same sparsity // structure as M, and its pattern is a subset of M. // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse sparse bitmap bitmap (method: 04) // sparse sparse bitmap full (method: 04) // sparse sparse full bitmap (method: 04) // sparse sparse full full (method: 04) // TODO: this function can also do eWiseAdd, just as easily. // Just change the "&&" to "||" in the GB_emult_04_template. // If A and B are both full, eadd and emult are identical. #include "GB_ewise.h" #include "GB_emult.h" #include "GB_binop.h" #include "GB_unused.h" #ifndef GBCOMPACT #include "GB_binop__include.h" #endif #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Work, int64_t) ; \ GB_WERK_POP (M_ek_slicing, int64_t) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_phbix_free (C) ; \ } GrB_Info GB_emult_04 // C<M>=A.*B, M sparse/hyper, A and B bitmap/full ( GrB_Matrix C, // output matrix, static header const GrB_Type ctype, // type of output matrix C const bool C_is_csc, // format of output matrix C const GrB_Matrix M, // sparse/hyper, not NULL const bool Mask_struct, // if true, use the only structure of M bool *mask_applied, // if true, the mask was applied const GrB_Matrix A, // input A matrix (bitmap/full) const GrB_Matrix B, // input B matrix (bitmap/full) const GrB_BinaryOp op, // op to perform C = op (A,B) GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL && C->static_header) ; ASSERT_MATRIX_OK (M, "M for emult_04", GB0) ; ASSERT_MATRIX_OK (A, "A for emult_04", GB0) ; ASSERT_MATRIX_OK (B, "B for emult_04", GB0) ; ASSERT_BINARYOP_OK (op, "op for emult_04", GB0) ; ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ; ASSERT (!GB_PENDING (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ; ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B) || GB_as_if_full (B)) ; int C_sparsity = GB_sparsity (M) ; GBURBLE ("emult_04:(%s<%s>=%s.*%s) ", GB_sparsity_char (C_sparsity), GB_sparsity_char_matrix (M), GB_sparsity_char_matrix (A), GB_sparsity_char_matrix (B)) ; //-------------------------------------------------------------------------- // declare workspace //-------------------------------------------------------------------------- GB_WERK_DECLARE (Work, int64_t) ; int64_t *restrict Wfirst = NULL ; int64_t *restrict Wlast = NULL ; int64_t *restrict Cp_kfirst = NULL ; GB_WERK_DECLARE (M_ek_slicing, int64_t) ; //-------------------------------------------------------------------------- // get M, A, and B //-------------------------------------------------------------------------- const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = (Mask_struct) ? NULL : (GB_void *) M->x ; const int64_t vlen = M->vlen ; const int64_t vdim = M->vdim ; const int64_t nvec = M->nvec ; const int64_t mnz = GB_nnz (M) ; const size_t msize = M->type->size ; const int8_t *restrict Ab = A->b ; const int8_t *restrict Bb = B->b ; //-------------------------------------------------------------------------- // check if C is iso and compute its iso value if it is //-------------------------------------------------------------------------- const size_t csize = ctype->size ; GB_void cscalar [GB_VLA(csize)] ; bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ; //-------------------------------------------------------------------------- // allocate C->p and C->h //-------------------------------------------------------------------------- GB_OK (GB_new (&C, true, // sparse or hyper (same as M), static header ctype, vlen, vdim, GB_Ap_calloc, C_is_csc, C_sparsity, M->hyper_switch, nvec, Context)) ; int64_t *restrict Cp = C->p ; //-------------------------------------------------------------------------- // slice the mask matrix M //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int M_ntasks, M_nthreads ; GB_SLICE_MATRIX (M, 8, chunk) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_PUSH (Work, 3*M_ntasks, int64_t) ; if (Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Wfirst = Work ; Wlast = Work + M_ntasks ; Cp_kfirst = Work + M_ntasks * 2 ; //-------------------------------------------------------------------------- // count entries in C //-------------------------------------------------------------------------- // This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR). // TODO: if M is structural and A and B are both full, then C has exactly // the same pattern as M, the first phase can be skipped. int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < M_ntasks ; tid++) { int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // count the entries in C(:,j) int64_t j = GBH (Mh, k) ; int64_t pstart = j * vlen ; // start of A(:,j) and B(:,j) int64_t pM, pM_end ; GB_get_pA (&pM, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, vlen) ; int64_t cjnz = 0 ; for ( ; pM < pM_end ; pM++) { bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { int64_t i = Mi [pM] ; cjnz += (GBB (Ab, pstart + i) && // TODO: for GB_add, use || instead GBB (Bb, pstart + i)) ; } } if (k == kfirst) { Wfirst [tid] = cjnz ; } else if (k == klast) { Wlast [tid] = cjnz ; } else { Cp [k] = cjnz ; } } } //-------------------------------------------------------------------------- // finalize Cp, cumulative sum of Cp and compute Cp_kfirst //-------------------------------------------------------------------------- GB_ek_slice_merge1 (Cp, Wfirst, Wlast, M_ek_slicing, M_ntasks) ; GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec, Wfirst, Wlast, M_ek_slicing, M_ntasks, M_nthreads, Context) ; //-------------------------------------------------------------------------- // allocate C->i and C->x //-------------------------------------------------------------------------- int64_t cnz = Cp [nvec] ; // set C->iso = C_iso OK GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ; //-------------------------------------------------------------------------- // copy pattern into C //-------------------------------------------------------------------------- // TODO: could make these components of C shallow instead if (GB_IS_HYPERSPARSE (M)) { // copy M->h into C->h GB_memcpy (C->h, Mh, nvec * sizeof (int64_t), M_nthreads) ; } C->nvec = nvec ; C->jumbled = M->jumbled ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // get the opcode //-------------------------------------------------------------------------- GB_Opcode opcode = op->opcode ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; bool op_is_first = (opcode == GB_FIRST_binop_code) ; bool op_is_second = (opcode == GB_SECOND_binop_code) ; bool op_is_pair = (opcode == GB_PAIR_binop_code) ; GB_Type_code ccode = ctype->code ; //-------------------------------------------------------------------------- // check if the values of A and/or B are ignored //-------------------------------------------------------------------------- // With C = ewisemult (A,B), only the intersection of A and B is used. // If op is SECOND or PAIR, the values of A are never accessed. // If op is FIRST or PAIR, the values of B are never accessed. // If op is PAIR, the values of A and B are never accessed. // Contrast with ewiseadd. // A is passed as x, and B as y, in z = op(x,y) bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ; bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ; //-------------------------------------------------------------------------- // using a built-in binary operator (except for positional operators) //-------------------------------------------------------------------------- #define GB_PHASE_2_OF_2 if (C_iso) { //---------------------------------------------------------------------- // C is iso //---------------------------------------------------------------------- // Cx [0] = cscalar = op (A,B) GB_BURBLE_MATRIX (C, "(iso emult) ") ; memcpy (C->x, cscalar, csize) ; // pattern of C = set intersection of pattern of A and B #define GB_ISO_EMULT #include "GB_emult_04_template.c" } else { //---------------------------------------------------------------------- // C is non-iso //---------------------------------------------------------------------- bool done = false ; #ifndef GBCOMPACT //------------------------------------------------------------------ // define the worker for the switch factory //------------------------------------------------------------------ #define GB_AemultB_04(mult,xname) GB (_AemultB_04_ ## mult ## xname) #define GB_BINOP_WORKER(mult,xname) \ { \ info = GB_AemultB_04(mult,xname) (C, M, Mask_struct, A, B, \ Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //------------------------------------------------------------------ // launch the switch factory //------------------------------------------------------------------ GB_Type_code xcode, ycode, zcode ; if (!op_is_positional && GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern, op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode) { #define GB_NO_PAIR #include "GB_binop_factory.c" } #endif //---------------------------------------------------------------------- // generic worker //---------------------------------------------------------------------- if (!done) { GB_BURBLE_MATRIX (C, "(generic emult_04: %s) ", op->name) ; GB_ewise_generic (C, op, NULL, 0, 0, NULL, NULL, NULL, C_sparsity, GB_EMULT_METHOD4, Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads, NULL, 0, 0, NULL, 0, 0, M, Mask_struct, false, A, B, Context) ; } } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- GB_OK (GB_hypermatrix_prune (C, Context)) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C output for emult_04", GB0) ; (*mask_applied) = true ; return (GrB_SUCCESS) ; }
CRegionColoring.h
/////////////////////////////////////////////////////////////////////////////// // $Id$ // // 3DimViewer // Lightweight 3D DICOM viewer. // // Copyright 2008-2016 3Dim Laboratory s.r.o. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /////////////////////////////////////////////////////////////////////////////// #ifndef CRegionColoring_H #define CRegionColoring_H /////////////////////////////////////////////////////////////////////////////// // include files #include "data/CColoringFunc.h" #include "data/CObjectHolder.h" #include "data/CSnapshot.h" #include <data/CStorageInterface.h> #include <data/storage_ids_core.h> // STL #include <vector> namespace data { /////////////////////////////////////////////////////////////////////////////// //! Additional region info. class CRegionInfo { public: //! Default constructor. CRegionInfo() : m_ssName("Unnamed"), m_bVisible(true), m_bSelected(false), m_bAuxiliary(false) {} //! Just another constructor. CRegionInfo(const std::string& Name) : m_ssName(Name), m_bVisible(true), m_bSelected(false), m_bAuxiliary(false) {} //! Changes the region name. CRegionInfo& setName(const std::string& ssName) { m_ssName = ssName; return *this; } //! Changes region visibility. CRegionInfo& setVisibility(bool bVisible) { m_bVisible = bVisible; return *this; } //! Sets region selection flag. CRegionInfo& setSelected(bool bSelected) { m_bSelected = bSelected; return *this; } //! Sets region auxiliary flag. CRegionInfo& setAuxiliary(bool bAuxiliary) { m_bAuxiliary = bAuxiliary; return *this; } //! Returns region name. const std::string& getName() const { return m_ssName; } //! Returns if the region is visible. bool isVisible() const { return m_bVisible; } //! Returns if the region is selected. bool isSelected() const { return m_bSelected; } //! Returns if the region is auxiliary. bool isAuxiliary() const { return m_bAuxiliary; } //! Serialize template < class tpSerializer > void serialize(vpl::mod::CChannelSerializer<tpSerializer> & Writer) { Writer.write( m_ssName ); Writer.write( (unsigned char)m_bVisible ); //Writer.write((unsigned char)m_bSelected); //Writer.write((unsigned char)m_bAuxiliary); } //! Deserialize template < class tpSerializer > void deserialize(vpl::mod::CChannelSerializer<tpSerializer> & Reader) { Reader.read( m_ssName ); unsigned char b = 0; Reader.read( b ); m_bVisible = b; /*b = 0; Reader.read(b); m_bSelected = b; b = 0; Reader.read(b); m_bAuxiliary = b;*/ } protected: //! Region name. std::string m_ssName; //! Visibility flag. bool m_bVisible; //! Selection flag. bool m_bSelected; //! Auxiliary region flag (e.g. region created during supervoxels segmentation). bool m_bAuxiliary; }; //////////////////////////////////////////////////////////////////////////////////////////////////// //\class CRegionColoringSnapshot // //\brief Region coloring snapshot. //////////////////////////////////////////////////////////////////////////////////////////////////// class CRegionColoringSnapshot : public CSnapshot { public: //! Constructor CRegionColoringSnapshot( int type, CUndoProvider * provider = NULL ) : CSnapshot( type, provider ) , m_Colors(1) {} //! Destructor ~CRegionColoringSnapshot(){} //! Each snapshot object must return its data size in bytes virtual long getDataSize() { return sizeof(CColorVector4b) * m_Colors.getSize() + sizeof( CRegionInfo ) * m_Regions.size(); } protected: //! Vector of region info structures. typedef std::vector<CRegionInfo> tRegions; //! Vector of assigned colors. CColorVector4b m_Colors; //! Regions tRegions m_Regions; // Friend class friend class CRegionColoring; }; /////////////////////////////////////////////////////////////////////////////// //! Functor for coloring of segmented density data. class CRegionColoring : public CColoringFunc4b, public CUndoProvider { public: //! Smart pointer type. //! - Declares type tSmartPtr. VPL_SHAREDPTR(CRegionColoring); //! Initial number of regions. enum { NUM_OF_REGIONS = 10 }; //! Default color transparency. enum { ALPHA = 128 }; public: //! Default constructor. CRegionColoring(); //! Destructor. virtual ~CRegionColoring() {} //! Changes the number of regions. void resize(int Size, bool bRandomColor = true) { // Resize the vector of colors m_Colors.resize(Size, bRandomColor, ALPHA); // Region info m_Regions.resize(tRegions::size_type(Size)); } //! Returns the number of regions. int getNumOfRegions() const { return m_Colors.getSize(); } //! Returns label of the active region. int getActiveRegion() const { return m_Active; } //! Changes the active region. CRegionColoring& setActiveRegion(int i) { m_Active = (i >= 0 && i < getNumOfRegions()) ? i : m_Active; return *this; } //! Returns true if a region is visible. bool isVisible(int i) const { // return m_Regions[i].isVisible(); return (i < 0 || i >= getNumOfRegions()) ? false : m_Regions[i].isVisible(); } //! Changes the active region. CRegionColoring& setVisibility(int i, bool bVisible) { if( i < 0 || i >= getNumOfRegions() ) { return *this; } m_Regions[i].setVisibility(bVisible); return *this; } //! Changes a region color. CRegionColoring& setColor(int i, const tColor& Color) { // m_Colors.setColor(i, Color); m_Colors.setColorSafe(i, Color); return *this; } //! Returns a region color. const tColor& getColor(int i) const { // return m_Colors.getColor(i); return m_Colors.getColorSafe(i); } //! Coloring function. virtual tColor makeColor(const tPixel& Density) { int i = int(Density); if( i < 0 || i >= getNumOfRegions() || !m_Regions[i].isVisible() ) { return m_DummyColor; } return m_Colors.getColor(i); } //! Returns type of the coloring function. virtual int getType() const { return ColoringFunc::REGION_COLORING; } //! Does object contain relevant data? virtual bool hasData(){ return true; } //! Returns reference to the region info. CRegionInfo& getRegionInfo(int i) { // return m_Regions[i]; return (i < 0 || i >= getNumOfRegions()) ? m_DummyRegion : m_Regions[i]; } //! Returns reference to the region info. const CRegionInfo& getRegionInfo(int i) const { // return m_Regions[i]; return (i < 0 || i >= getNumOfRegions()) ? m_DummyRegion : m_Regions[i]; } void setRegionInfo(int i, const CRegionInfo &info) { if (i >= 0 || i < getNumOfRegions()) { m_Regions[i] = info; } } //! Regenerates the object state according to any changes in the data storage. void update(const CChangedEntries& VPL_UNUSED(Changes)) { // Does nothing... } //! Initializes the object to its default state. void init() { resize(NUM_OF_REGIONS); // Initialize colors m_Colors[0] = tColor(0, 0, 0, 0); m_Colors[1] = tColor(255, 128, 128, ALPHA); m_Colors[2] = tColor(128, 255, 128, ALPHA); m_Colors[3] = tColor(128, 128, 255, ALPHA); m_Colors[4] = tColor(255, 255, 128, ALPHA); m_Colors[5] = tColor(128, 255, 255, ALPHA); m_Colors[6] = tColor(255, 128, 255, ALPHA); m_Colors[7] = tColor(255, 128, 64, ALPHA); m_Colors[8] = tColor(0, 128, 128, ALPHA); m_Colors[9] = tColor(128, 128, 64, ALPHA); // Initialize regions m_Regions[0].setName("Not classified").setVisibility(true).setSelected(false).setAuxiliary(false); m_Regions[1].setName("Region 1").setVisibility(true).setSelected(false).setAuxiliary(false); m_Regions[2].setName("Region 2").setVisibility(true).setSelected(false).setAuxiliary(false); m_Regions[3].setName("Region 3").setVisibility(true).setSelected(false).setAuxiliary(false); m_Regions[4].setName("Region 4").setVisibility(true).setSelected(false).setAuxiliary(false); m_Regions[5].setName("Region 5").setVisibility(true).setSelected(false).setAuxiliary(false); m_Regions[6].setName("Region 6").setVisibility(true).setSelected(false).setAuxiliary(false); m_Regions[7].setName("Region 7").setVisibility(true).setSelected(false).setAuxiliary(false); m_Regions[8].setName("Region 8").setVisibility(true).setSelected(false).setAuxiliary(false); m_Regions[9].setName("Region 9").setVisibility(true).setSelected(false).setAuxiliary(false); m_Active = 1; } //! Returns true if changes of a given parent entry may affect this object. bool checkDependency(CStorageEntry * VPL_UNUSED(pParent)) { return true; } // Undo providing //! Create snapshot of the current state. virtual CSnapshot * getSnapshot( CSnapshot * VPL_UNUSED(snapshot) ) { CRegionColoringSnapshot * s = new CRegionColoringSnapshot( data::UNDO_ALL, this ); // Store regions for( tRegions::iterator r = m_Regions.begin(); r != m_Regions.end(); ++r ) s->m_Regions.push_back( *r ); // Store colors s->m_Colors.resize( m_Colors.getSize() ); for( int i = 0; i < m_Colors.getSize(); ++i ) s->m_Colors.setColor( i, m_Colors.getColor( i ) ); return s; } //! Restore state from the snapshot virtual void restore( CSnapshot * snapshot ) { if( snapshot == 0 ) return; CRegionColoringSnapshot * s = dynamic_cast< CRegionColoringSnapshot * >( snapshot ); if( s == 0 ) return; // Restore regions m_Regions.clear(); for( tRegions::iterator r = s->m_Regions.begin(); r != s->m_Regions.end(); ++r ) m_Regions.push_back( *r ); // Restore colors m_Colors.resize( s->m_Colors.getSize() ); for( int i = 0; i < s->m_Colors.getSize(); ++i ) m_Colors.setColor( i, s->m_Colors.getColor( i ) ); } //! Serialize template < class tpSerializer > void serialize(vpl::mod::CChannelSerializer<tpSerializer> & Writer) { Writer.beginWrite( *this ); Writer.write( (vpl::sys::tUInt32) 1 ); // version m_Colors.serialize( Writer ); // Write regions vector size Writer.write( (vpl::sys::tUInt32)m_Regions.size() ); // Serialize regions tRegions::iterator it, itEnd( m_Regions.end() ); for( it = m_Regions.begin(); it != itEnd; ++it ) it->serialize( Writer ); //m_DummyRegion.serialize( Writer ); Writer.write( (vpl::sys::tInt32)m_Active ); //m_DummyColor.serialize( Writer ); Writer.endWrite(*this); } //! Deserialize template < class tpSerializer > void deserialize(vpl::mod::CChannelSerializer<tpSerializer> & Reader) { Reader.beginRead(*this); vpl::sys::tUInt32 ver = 0; Reader.read( ver ); // version m_Colors.deserialize( Reader ); // Read and set regions vector size vpl::sys::tUInt32 size = 0; Reader.read( size ); m_Regions.resize( size ); // Serialize regions tRegions::iterator it, itEnd( m_Regions.end() ); for( it = m_Regions.begin(); it != itEnd; ++it ) it->deserialize( Reader ); //m_DummyRegion.deserialize( Reader ); vpl::sys::tInt32 active = 0; Reader.read( active ); m_Active = active; //m_DummyColor.deserialize( Reader ); Reader.endRead( *this ); } //! Colorize whole slice image virtual void colorize(vpl::img::CRGBImage &rgbImage, const vpl::img::CDImage &densityImage, const data::CSlicePropertyContainer &properties) { // region coloring does nothing here } void colorize(vpl::img::CRGBImage &rgbImage, const vpl::img::CImage16 &regionImage) { const int xSize = std::min(regionImage.getXSize(),rgbImage.getXSize()); const int ySize = std::min(regionImage.getYSize(),rgbImage.getYSize()); #pragma omp parallel for for (int y = 0; y < ySize; ++y) { for (int x = 0; x < xSize; ++x) { if (getRegionInfo(regionImage(x, y)).isSelected()) { tColor newColor(255, 255, 0, 255); rgbImage(x, y) = *(reinterpret_cast<vpl::img::tRGBPixel *>(&newColor)); } else { vpl::img::CRGBPixel pixel = rgbImage(x, y); tColor prevColor = *(reinterpret_cast<tColor *>(&pixel)); tColor currColor = makeColor(regionImage(x, y)); tColor newColor = blendColors(currColor, prevColor); rgbImage(x, y) = *(reinterpret_cast<vpl::img::tRGBPixel *>(&newColor)); } } } } protected: //! Vector of region info structures. typedef std::vector<CRegionInfo> tRegions; protected: //! Vector of assigned colors. CColorVector4b m_Colors; //! Vector of region info structures. tRegions m_Regions; //! Dummy region. CRegionInfo m_DummyRegion; //! Index of the active region. int m_Active; //! Dummy color. tColor m_DummyColor; }; //////////////////////////////////////////////////////////////////////////////////////////////////// //!\brief Serialization wrapper. //////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_SERIALIZATION_WRAPPER( CRegionColoring ) namespace Storage { //! Region coloring. DECLARE_OBJECT(RegionColoring, CRegionColoring, CORE_STORAGE_REGION_COLORING_ID); } } // namespace data #endif // CRegionColoringFunc_H /////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
trsm_x_sky_u_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { for (ALPHA_INT c = A->cols - 1; c >= 0; c--) { ALPHA_Number temp; alpha_setzero(temp); for (ALPHA_INT ic = A->cols - 1; ic > c; ic--) { ALPHA_INT start = A->pointers[ic]; ALPHA_INT end = A->pointers[ic + 1]; ALPHA_INT eles_num = ic - c; if(end - eles_num - 1 >= start) alpha_madde(temp, A->values[end - eles_num - 1], y[ic * ldy + out_y_col]); } ALPHA_Number t; alpha_mul(t, alpha, x[c * ldx + out_y_col]); alpha_sub(y[c * ldy + out_y_col], t, temp); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
9915.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for for (t4 = 1; t4 <= nx - 1; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < nx - 1 ? t4 + 7 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 8) for (t10 = t8; t10 <= (ny - 1 < t8 + 7 ? ny - 1 : t8 + 7); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 1; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < nx - 1 ? t4 + 7 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 8) for (t10 = t8; t10 <= (ny - 1 < t8 + 7 ? ny - 1 : t8 + 7); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < nx - 2 ? t4 + 7 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 8) for (t10 = t8; t10 <= (ny - 2 < t8 + 7 ? ny - 2 : t8 + 7); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
calculate_global_physical_properties.h
#ifndef CALCULATE_GLOBAL_PHYSICAL_PROPERTIES_H #define CALCULATE_GLOBAL_PHYSICAL_PROPERTIES_H // /* External includes */ // System includes // Project includes #include "utilities/timer.h" #include "custom_utilities/create_and_destroy.h" #include "custom_elements/Particle_Contact_Element.h" #include "includes/variables.h" /* System includes */ #include <limits> #include <iostream> #include <iomanip> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif /* Project includes */ #include "includes/define.h" #include "utilities/openmp_utils.h" namespace Kratos { class SphericElementGlobalPhysicsCalculator { public: typedef ModelPart::ElementsContainerType ElementsArrayType; KRATOS_CLASS_POINTER_DEFINITION(SphericElementGlobalPhysicsCalculator); /// Default constructor. SphericElementGlobalPhysicsCalculator(ModelPart& r_model_part) { mInitialCenterOfMassAndMass = CalculateCenterOfMass(r_model_part); mInitialMass = CalculateTotalMass(r_model_part); } /// Destructor. virtual ~SphericElementGlobalPhysicsCalculator(){} //*************************************************************************************************************** //*************************************************************************************************************** double CalculateTotalVolume(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double added_volume = 0.0; #pragma omp parallel for reduction(+ : added_volume) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if (it->GetGeometry()[0].Is(BLOCKED)) { // we exclude blocked elements from the volume calculation (e.g., inlet injectors) continue; } if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { SphericParticle& r_spheric_particle = dynamic_cast<Kratos::SphericParticle&> (*it); const double particle_radius = r_spheric_particle.GetRadius(); added_volume += 4.0 / 3.0 * Globals::Pi * particle_radius * particle_radius * particle_radius; } } } return added_volume; } //*************************************************************************************************************** //*************************************************************************************************************** // Returns the minimum value of a double variable in the model part. double CalculateMaxNodalVariable(ModelPart& r_model_part, const Variable<double>& r_variable) { ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); KRATOS_ERROR_IF(pElements.size() == 0) << "Cannot compute maximum of the required nodal variable. Empty model part. Could not compute the maximum of the required variable " << r_variable << std::endl; ElementsArrayType::iterator it_begin = pElements.ptr_begin(); KRATOS_ERROR_IF_NOT(it_begin->GetGeometry()[0].SolutionStepsDataHas(r_variable)) << "Cannot compute maximum of the required nodal variable. Missing nodal variable " << r_variable << std::endl; std::vector<double> max_values; double max_val = - std::numeric_limits<double>::max(); max_values.resize(ParallelUtilities::GetNumThreads()); for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ max_values[k] = max_val; } OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), pElements.size(), mElementsPartition); unsigned int elem_counter; #pragma omp parallel for private(elem_counter) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ elem_counter = mElementsPartition[k]; for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ max_values[k] = std::max(max_values[k], (it)->GetGeometry()[0].FastGetSolutionStepValue(r_variable)); elem_counter++; } } // getting the maximum between threads: for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ max_val = std::max(max_val, max_values[k]); } return max_val; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateMinNodalVariable(ModelPart& r_model_part, const Variable<double>& r_variable) { ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); KRATOS_ERROR_IF(pElements.size() == 0) << "Cannot compute minimum of the required nodal variable. Empty model part. Could not compute the maximum of the required variable " << r_variable << std::endl; ElementsArrayType::iterator it_begin = pElements.ptr_begin(); KRATOS_ERROR_IF_NOT(it_begin->GetGeometry()[0].SolutionStepsDataHas(r_variable)) << "Cannot compute minimum of the required nodal variable. Missing variable " << r_variable << std::endl; std::vector<double> min_values; double min_val = std::numeric_limits<double>::max(); min_values.resize(ParallelUtilities::GetNumThreads()); for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ min_values[k] = min_val; } OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), pElements.size(), mElementsPartition); unsigned int elem_counter; #pragma omp parallel for private(elem_counter) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ elem_counter = mElementsPartition[k]; for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ min_values[k] = std::min(min_values[k], (it)->GetGeometry()[0].FastGetSolutionStepValue(r_variable)); elem_counter++; } } // getting the minimum between threads: for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ min_val = std::min(min_val, min_values[k]); } return min_val; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateD50(ModelPart& r_model_part) { const unsigned int size = r_model_part.GetCommunicator().LocalMesh().Elements().size(); OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), size, mElementsPartition); std::vector<double> radii; radii.resize(size); unsigned int particle_counter = 0; #pragma omp parallel for private(particle_counter) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ particle_counter = mElementsPartition[k]; for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ SphericParticle& r_spheric_particle = dynamic_cast<Kratos::SphericParticle&> (*it); radii[particle_counter] = r_spheric_particle.GetRadius(); particle_counter++; } } if (particle_counter) { std::sort(radii.begin(), radii.end()); int half = div(size, 2).quot; bool even = (size%2 == 0); double d50 = even ? 2 * radii[half] : radii[half] + radii[half + 1]; return d50; } else { return 0.00; } } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateTotalMass(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(),r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double added_mass = 0.0; #pragma omp parallel for reduction(+ : added_mass) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_mass = (it)->GetGeometry()[0].FastGetSolutionStepValue(NODAL_MASS); added_mass += particle_mass; } } } return added_mass; } //*************************************************************************************************************** //*************************************************************************************************************** array_1d<double, 3> CalculateCenterOfMass(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); const double total_mass_inv = 1 / CalculateTotalMass(r_model_part); double cm_x = 0.0; double cm_y = 0.0; double cm_z = 0.0; #pragma omp parallel for reduction(+ : cm_x, cm_y, cm_z) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_mass = (it)->GetGeometry()[0].FastGetSolutionStepValue(NODAL_MASS); cm_x += particle_mass * (it)->GetGeometry()[0].Coordinates()[0]; cm_y += particle_mass * (it)->GetGeometry()[0].Coordinates()[1]; cm_z += particle_mass * (it)->GetGeometry()[0].Coordinates()[2]; } } } array_1d<double, 3> center_of_mass; center_of_mass[0] = total_mass_inv * cm_x; center_of_mass[1] = total_mass_inv * cm_y; center_of_mass[2] = total_mass_inv * cm_z; return center_of_mass; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateGravitationalPotentialEnergy(ModelPart& r_model_part, const array_1d<double, 3> reference_point) { double gravitational_energy; const double total_mass = CalculateTotalMass(r_model_part); if (total_mass == 0) gravitational_energy = 0.0; else { const array_1d<double, 3>& gravity = r_model_part.GetProcessInfo()[GRAVITY]; const array_1d<double, 3> center_of_mass = CalculateCenterOfMass(r_model_part); const array_1d<double, 3> center_of_mass_to_reference = reference_point - center_of_mass; gravitational_energy = total_mass * (center_of_mass_to_reference[0] * gravity[0] + center_of_mass_to_reference[1] * gravity[1] + center_of_mass_to_reference[2] * gravity[2]); } return gravitational_energy; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateTranslationalKinematicEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double kinematic_energy = 0.0; #pragma omp parallel for reduction(+ : kinematic_energy) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_translational_kinematic_energy = 0.0; (it)->Calculate(PARTICLE_TRANSLATIONAL_KINEMATIC_ENERGY, particle_translational_kinematic_energy, r_model_part.GetProcessInfo()); kinematic_energy += particle_translational_kinematic_energy; } } } return kinematic_energy; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateRotationalKinematicEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double rotational_kinematic_energy = 0.0; #pragma omp parallel for reduction(+ : rotational_kinematic_energy) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_rotational_kinematic_energy = 0.0; (it)->Calculate(PARTICLE_ROTATIONAL_KINEMATIC_ENERGY, particle_rotational_kinematic_energy, r_model_part.GetProcessInfo()); rotational_kinematic_energy += particle_rotational_kinematic_energy; } } } return rotational_kinematic_energy; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateElasticEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double elastic_energy = 0.0; #pragma omp parallel for reduction(+ : elastic_energy) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_elastic_energy = 0.0; (it)->Calculate(PARTICLE_ELASTIC_ENERGY, particle_elastic_energy, r_model_part.GetProcessInfo()); elastic_energy += particle_elastic_energy; } } } return elastic_energy; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateInelasticFrictionalEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double frictional_energy = 0.0; #pragma omp parallel for reduction(+ : frictional_energy) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_frictional_energy = 0.0; (it)->Calculate(PARTICLE_INELASTIC_FRICTIONAL_ENERGY, particle_frictional_energy, r_model_part.GetProcessInfo()); frictional_energy += particle_frictional_energy; } } } return frictional_energy; } double CalculateInelasticViscodampingEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double viscodamping_energy = 0.0; #pragma omp parallel for reduction(+ : viscodamping_energy) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_viscodamping_energy = 0.0; (it)->Calculate(PARTICLE_INELASTIC_VISCODAMPING_ENERGY, particle_viscodamping_energy, r_model_part.GetProcessInfo()); viscodamping_energy += particle_viscodamping_energy; } } } return viscodamping_energy; } //*************************************************************************************************************** //*************************************************************************************************************** array_1d<double, 3> CalculateTotalMomentum(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double m_x = 0.0; double m_y = 0.0; double m_z = 0.0; #pragma omp parallel for reduction(+ : m_x, m_y, m_z) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { array_1d<double, 3> particle_momentum; (it)->Calculate(MOMENTUM, particle_momentum, r_model_part.GetProcessInfo()); m_x += particle_momentum[0]; m_y += particle_momentum[1]; m_z += particle_momentum[2]; } } } array_1d<double, 3> momentum; momentum[0] = m_x; momentum[1] = m_y; momentum[2] = m_z; return momentum; } //*************************************************************************************************************** //*************************************************************************************************************** array_1d<double, 3> CalulateTotalAngularMomentum(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); const array_1d<double, 3> center_of_mass = CalculateCenterOfMass(r_model_part); double am_x = 0.0; double am_y = 0.0; double am_z = 0.0; #pragma omp parallel for reduction(+ : am_x, am_y, am_z) for (int k = 0; k < ParallelUtilities::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { array_1d<double, 3> particle_momentum; array_1d<double, 3> particle_local_angular_momentum; array_1d<double, 3> center_of_mass_to_particle = (it)->GetGeometry()[0].Coordinates() - center_of_mass; (it)->Calculate(MOMENTUM, particle_momentum, r_model_part.GetProcessInfo()); (it)->Calculate(ANGULAR_MOMENTUM, particle_local_angular_momentum, r_model_part.GetProcessInfo()); array_1d<double, 3> aux; Kratos::MathUtils<double>::CrossProduct(aux, particle_momentum, center_of_mass_to_particle); am_x += particle_local_angular_momentum[0] + aux[0]; am_y += particle_local_angular_momentum[1] + aux[1]; am_z += particle_local_angular_momentum[2] + aux[2]; } } } array_1d<double, 3> angular_momentum; angular_momentum[0] = am_x; angular_momentum[1] = am_y; angular_momentum[2] = am_z; return angular_momentum; } //*************************************************************************************************************** //*************************************************************************************************************** // Check by how much Newton's Third Law is violated array_1d<double, 3> CalculateSumOfInternalForces(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(ParallelUtilities::GetNumThreads(),r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double sum_of_contact_forces_x = 0.0; double sum_of_contact_forces_y = 0.0; double sum_of_contact_forces_z = 0.0; #pragma omp parallel for reduction(+ : sum_of_contact_forces_x, sum_of_contact_forces_y, sum_of_contact_forces_z) for (int k = 0; k < ParallelUtilities::GetNumThreads(); ++k){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)){ const array_1d<double, 3>& contact_force = (it)->GetGeometry()[0].FastGetSolutionStepValue(CONTACT_FORCES); sum_of_contact_forces_x += contact_force[0]; sum_of_contact_forces_y += contact_force[1]; sum_of_contact_forces_z += contact_force[2]; } } } array_1d<double, 3> sum_of_contact_forces; sum_of_contact_forces[0] = sum_of_contact_forces_x; sum_of_contact_forces[1] = sum_of_contact_forces_y; sum_of_contact_forces[2] = sum_of_contact_forces_z; return sum_of_contact_forces; } //*************************************************************************************************************** //*************************************************************************************************************** ///@} ///@name Access ///@{ array_1d<double, 3> GetInitialCenterOfMass() { return mInitialCenterOfMassAndMass; } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { } ///@} ///@name Friends ///@{ std::vector<unsigned int>& GetElementPartition() { return (mElementsPartition); } ElementsArrayType::iterator GetElementPartitionBegin(ModelPart& r_model_part, unsigned int k) { ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); return (pElements.ptr_begin() + mElementsPartition[k]); } ElementsArrayType::iterator GetElementPartitionEnd(ModelPart& r_model_part, unsigned int k) { ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); return (pElements.ptr_begin() + mElementsPartition[k + 1]); } ///@} protected: ///@name Protected static Member r_variables ///@{ ///@} ///@name Protected member r_variables ///@{ template<class T, std::size_t dim> ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ std::vector<unsigned int> mElementsPartition; ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member r_variables ///@{ ///@} ///@name Member r_variables ///@{ array_1d<double, 3> mInitialCenterOfMassAndMass; double mInitialMass; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. SphericElementGlobalPhysicsCalculator & operator=(SphericElementGlobalPhysicsCalculator const& rOther); ///@} }; // Class SphericElementGlobalPhysicsCalculator ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ } // namespace Kratos. #endif // CALCULATE_GLOBAL_PHYSICAL_PROPERTIES_H
par_forloop.h
#pragma omp parallel for
heat_3d-a.pluto.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Discretized 3D heat equation stencil with non periodic boundary conditions * Adapted from Pochoir test bench */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> /* * N is the number of points * T is the number of timesteps */ #ifdef HAS_DECLS #include "decls.h" #else #define N 800L #define T 800L #endif #define NUM_FP_OPS 15 /* Define our arrays */ double total=0; double sum_err_sqr=0; int chtotal=0; /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char * argv[]) { long int t, i, j, k; const int BASE = 1024; long count=0; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0; // double A[2][N][N][N]; double ****A = (double ****)malloc(2 * sizeof (double ***)); int l; for (l = 0; l < 2; l++){ A[l] = (double ***) malloc(N * sizeof(double **)); for (i = 0; i < N; i++){ A[l][i] = (double **) malloc(N * sizeof(double *)); for (j = 0; j < N; j++) A[l][i][j] = (double *) malloc(N * sizeof (double)); } } printf("Number of points = %ld\t|Number of timesteps = %ld\t", N, T); /* Initialization */ srand(42); // seed with a constant value to verify results for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef TIME gettimeofday(&start, 0); #endif // #undef N // #define N 150L #undef T #define T 400L /* Copyright (C) 1991-2012 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* We do support the IEC 559 math functionality, real and complex. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((N >= 1) && (T >= 1)) { for (t1=-1;t1<=T-1;t1++) { lbp=ceild(t1,2); ubp=floord(2*t1+N,4); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(0,ceild(t1-1,2));t3<=min(floord(2*T+N-2,4),floord(2*t1+N+2,4));t3++) { for (t4=max(max(0,ceild(t1-1011,1012)),ceild(4*t3-N-2022,2024));t4<=min(min(floord(2*T+N-2,2024),floord(2*t1+N+2,2024)),floord(4*t3+N+2,2024));t4++) { if ((t1 <= floord(2024*t4-N,2)) && (t2 <= 506*t4-1) && (t3 <= 506*t4-1) && (t4 >= ceild(N,2024))) { if (N%2 == 0) { for (t6=max(max(4*t2,2024*t4-N+1),-4*t1+4*t2+4048*t4-2*N-1);t6<=min(4*t2+3,-4*t1+4*t2+4048*t4-2*N+2);t6++) { for (t7=max(4*t3,2024*t4-N+1);t7<=4*t3+3;t7++) { A[0][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)] = 0.125 * ((((-2024*t4+t6+N-1)+1) >= N ? 0 : A[1][(-2024*t4+t6+N-1)+1][(-2024*t4+t7+N-1)][(N-1)]) - 2.0 * A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)] + (((-2024*t4+t6+N-1)-1) < 0 ? 0 : A[1][(-2024*t4+t6+N-1)-1][(-2024*t4+t7+N-1)][(N-1)])) + 0.125 * ((((-2024*t4+t7+N-1)+1) >= N ? 0 : A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)+1][(N-1)]) - 2.0 * A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)] + (((-2024*t4+t7+N-1)-1) < 0 ? 0 : A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)+1]) - 2.0 * A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)-1])) + A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)];; } } } } if ((t1 <= floord(4*t3-N,2)) && (t2 <= t3-1) && (t3 >= ceild(N,4))) { if (N%2 == 0) { for (t6=max(max(4*t2,4*t3-N+1),-4*t1+4*t2+8*t3-2*N-1);t6<=min(4*t2+3,-4*t1+4*t2+8*t3-2*N+2);t6++) { lbv=max(2024*t4,4*t3-N+1); ubv=min(4*t3,2024*t4+2023); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)] = 0.125 * ((((-4*t3+t6+N-1)+1) >= N ? 0 : A[1][(-4*t3+t6+N-1)+1][(N-1)][(-4*t3+t8+N-1)]) - 2.0 * A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)] + (((-4*t3+t6+N-1)-1) < 0 ? 0 : A[1][(-4*t3+t6+N-1)-1][(N-1)][(-4*t3+t8+N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-4*t3+t6+N-1)][(N-1)+1][(-4*t3+t8+N-1)]) - 2.0 * A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-4*t3+t6+N-1)][(N-1)-1][(-4*t3+t8+N-1)])) + 0.125 * ((((-4*t3+t8+N-1)+1) >= N ? 0 : A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)+1]) - 2.0 * A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)] + (((-4*t3+t8+N-1)-1) < 0 ? 0 : A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)-1])) + A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)];; } } } } if ((t1 >= 0) && (2*t1 == 4*t2-N)) { for (t7=max(4*t3,2*t1+1);t7<=min(2*t1+N,4*t3+3);t7++) { lbv=max(2024*t4,2*t1+1); ubv=min(2*t1+N,2024*t4+2023); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if ((2*t1+3*N)%4 == 0) { A[0][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(N-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(N-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(N-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(N-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(2*t3,T-2),1012*t4+1010)) && (2*t1 == 4*t2-N)) { for (t7=max(4*t3,2*t1+2);t7<=min(4*t3+3,2*t1+N+1);t7++) { lbv=max(2024*t4,2*t1+2); ubv=min(2024*t4+2023,2*t1+N+1); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if ((2*t1+3*N)%4 == 0) { A[1][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((N-1)+1) >= N ? 0 : A[0][(N-1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((N-1)-1) < 0 ? 0 : A[0][(N-1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(N-1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(N-1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } } if ((t1 == 2*t2) && (t1 <= min(floord(4*t3-N+3,2),floord(2024*t4-N+2023,2))) && (t1 >= max(ceild(4*t3-N+1,2),ceild(2024*t4-N+1,2)))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { lbv=max(2*t1,2024*t4); ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][0][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][0][(-2*t1+t7)][(-2*t1+t8)];; } } } for (t6=2*t1+1;t6<=min(2*t1+2,2*t1+N);t6++) { for (t7=max(4*t3,2*t1+1);t7<=2*t1+N;t7++) { lbv=max(2024*t4,2*t1+1); ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } } if ((t1 == 2*t2) && (t1 <= floord(2024*t4-N+2023,2)) && (t1 >= max(ceild(4*t3-N+4,2),ceild(2024*t4-N+1,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=max(2*t1,2024*t4); ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][0][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][0][(-2*t1+t7)][(-2*t1+t8)];; } } } for (t6=2*t1+1;t6<=2*t1+2;t6++) { for (t7=max(4*t3,2*t1+1);t7<=4*t3+3;t7++) { lbv=max(2024*t4,2*t1+1); ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } } if ((t1 == 2*t2) && (t1 <= floord(4*t3-N+3,2)) && (t1 >= max(ceild(4*t3-N+1,2),ceild(2024*t4-N+2024,2)))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { lbv=max(2*t1,2024*t4); ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][0][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][0][(-2*t1+t7)][(-2*t1+t8)];; } } } for (t6=2*t1+1;t6<=2*t1+2;t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=max(2024*t4,2*t1+1); ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } } if ((t1 == 2*t2) && (t1 >= max(ceild(4*t3-N+4,2),ceild(2024*t4-N+2024,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=max(2*t1,2024*t4); ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][0][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][0][(-2*t1+t7)][(-2*t1+t8)];; } } } for (t6=2*t1+1;t6<=2*t1+2;t6++) { for (t7=max(4*t3,2*t1+1);t7<=4*t3+3;t7++) { lbv=max(2024*t4,2*t1+1); ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } } if ((t1 <= min(floord(2024*t4-N+2023,2),2*t2-1)) && (t1 >= max(max(ceild(4*t2-N+1,2),2*t3),1012*t4))) { lbv=2*t1; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][0][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][0][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][0][(-2*t1+t8)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][0 +1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0 -1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][0][(-2*t1+t8)];; } for (t7=2*t1+1;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2*t1+1; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(floord(2024*t4-N+2023,2),2*t2-1),1012*t4-1)) && (t1 >= max(max(ceild(4*t2-N+1,2),ceild(2024*t4-N+1,2)),2*t3))) { lbv=2024*t4; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][0][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][0][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][0][(-2*t1+t8)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][0 +1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0 -1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][0][(-2*t1+t8)];; } for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= 2*t2-1) && (t1 >= max(max(max(ceild(4*t2-N+1,2),ceild(2024*t4-N+2024,2)),2*t3),1012*t4))) { lbv=2*t1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][0][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][0][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][0][(-2*t1+t8)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][0 +1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0 -1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][0][(-2*t1+t8)];; } for (t7=2*t1+1;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(2*t2-1,1012*t4-1)) && (t1 >= max(max(ceild(4*t2-N+1,2),ceild(2024*t4-N+2024,2)),2*t3))) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][0][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][0][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][0][(-2*t1+t8)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][0 +1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0 -1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][0][(-2*t1+t8)];; } for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(floord(4*t3-N+3,2),floord(2024*t4-N+2023,2)),2*t2-1)) && (t1 >= max(ceild(4*t3-N+1,2),1012*t4))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } lbv=2*t1+1; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(N-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(N-1)][(-2*t1+t8-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=2*t1+1; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(floord(2024*t4-N+2023,2),2*t2-1),2*t3-1)) && (t1 >= max(max(ceild(4*t2-N+1,2),ceild(4*t3-N+4,2)),1012*t4))) { for (t7=4*t3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+1; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(floord(4*t3-N+3,2),2*t2-1)) && (t1 >= max(max(ceild(4*t3-N+1,2),ceild(2024*t4-N+2024,2)),1012*t4))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(N-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(N-1)][(-2*t1+t8-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(2*t2-1,2*t3-1)) && (t1 >= max(max(max(ceild(4*t2-N+1,2),ceild(4*t3-N+4,2)),ceild(2024*t4-N+2024,2)),1012*t4))) { for (t7=4*t3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(min(floord(4*t3-N+3,2),floord(2024*t4-N+2023,2)),2*t2-1),1012*t4-1)) && (t1 >= max(max(0,ceild(4*t3-N+1,2)),ceild(2024*t4-N+1,2)))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { lbv=2024*t4; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } lbv=2024*t4; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(N-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(N-1)][(-2*t1+t8-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=2024*t4; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(min(floord(2024*t4-N+2023,2),2*t2-1),2*t3-1),1012*t4-1)) && (t1 >= max(max(max(0,ceild(4*t2-N+1,2)),ceild(4*t3-N+4,2)),ceild(2024*t4-N+1,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(floord(4*t3-N+3,2),2*t2-1),1012*t4-1)) && (t1 >= max(max(0,ceild(4*t3-N+1,2)),ceild(2024*t4-N+2024,2)))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(N-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(N-1)][(-2*t1+t8-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(2*t2-1,2*t3-1),1012*t4-1)) && (t1 >= max(max(max(0,ceild(4*t2-N+1,2)),ceild(4*t3-N+4,2)),ceild(2024*t4-N+2024,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((N >= 3) && (t1 <= min(min(2*t3,T-2),1012*t4+1010)) && (2*t1 == 4*t2-N+1)) { for (t6=2*t1+N;t6<=2*t1+N+1;t6++) { for (t7=max(4*t3,2*t1+2);t7<=min(4*t3+3,2*t1+N+1);t7++) { lbv=max(2024*t4,2*t1+2); ubv=min(2024*t4+2023,2*t1+N+1); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if ((2*t1+3*N+1)%4 == 0) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } } for (t7=max(4*t3,2*t1+3);t7<=4*t3+3;t7++) { lbv=max(2024*t4,2*t1+3); ubv=min(2024*t4+2023,2*t1+N+2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if ((2*t1+3*N+1)%4 == 0) { A[0][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(N-1)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(N-1)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(N-1)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(N-1)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } } if ((t1 <= min(min(min(floord(2024*t4-N+2021,2),2*t3),T-2),2*t2-1)) && (t1 >= max(max(ceild(4*t2-N+2,2),2*t3-1),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=2*t1+2;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][0][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)];; } for (t7=2*t1+3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } } if ((t1 <= min(min(min(min(floord(2024*t4-N+2021,2),2*t3),T-2),2*t2-1),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(2024*t4-N-1,2)),2*t3-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=2*t1+2;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][0][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)];; } for (t7=2*t1+3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } } if ((t1 <= min(min(min(floord(4*t3-N+1,2),floord(2024*t4-N+2021,2)),T-2),2*t2-1)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N-1,2)),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } lbv=2*t1+3; ubv=2*t1+N+2; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)];; } } if ((t1 <= min(min(min(min(floord(4*t3-N+1,2),floord(2024*t4-N+2021,2)),T-2),2*t2-1),1012*t4-2)) && (t1 >= max(ceild(4*t2-N+2,2),ceild(4*t3-N-1,2)))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } lbv=2024*t4; ubv=2*t1+N+2; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)];; } } if ((t1 <= min(min(min(floord(2024*t4-N+2021,2),T-2),2*t2-1),2*t3-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+2,2)),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } } if ((t1 <= min(min(min(min(floord(2024*t4-N+2021,2),T-2),2*t2-1),2*t3-2),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+2,2)),ceild(2024*t4-N-1,2)))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } } if ((t1 <= min(min(min(2*t3,T-2),2*t2-1),1012*t4+1010)) && (t1 >= max(max(max(ceild(4*t2-N+2,2),ceild(2024*t4-N+2022,2)),2*t3-1),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=2*t1+2;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][0][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)];; } for (t7=2*t1+3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } if ((t1 <= min(min(min(2*t3,T-2),2*t2-1),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(2024*t4-N+2022,2)),2*t3-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=2*t1+2;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][0][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)];; } for (t7=2*t1+3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } if ((t1 <= min(min(min(floord(4*t3-N+1,2),T-2),2*t2-1),1012*t4+1010)) && (t1 >= max(max(max(ceild(4*t2-N+2,2),ceild(4*t3-N-1,2)),ceild(2024*t4-N+2022,2)),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)];; } } if ((t1 <= min(min(min(floord(4*t3-N+1,2),T-2),2*t2-1),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N-1,2)),ceild(2024*t4-N+2022,2)))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)];; } } if ((t1 <= min(min(min(T-2,2*t2-1),2*t3-2),1012*t4+1010)) && (t1 >= max(max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+2,2)),ceild(2024*t4-N+2022,2)),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } if ((t1 <= min(min(min(T-2,2*t2-1),2*t3-2),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+2,2)),ceild(2024*t4-N+2022,2)))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } if ((N == 1) && (t1 == 2*t2) && (t1 == 2*t3) && (t1 <= T-2)) { if (t1%2 == 0) { A[1][0][0][0] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][0][0]) - 2.0 * A[0][0][0][0] + ((0 -1) < 0 ? 0 : A[0][0 -1][0][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][0][0 +1][0]) - 2.0 * A[0][0][0][0] + ((0 -1) < 0 ? 0 : A[0][0][0 -1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][0][0][0 +1]) - 2.0 * A[0][0][0][0] + ((0 -1) < 0 ? 0 : A[0][0][0][0 -1])) + A[0][0][0][0];; } if (t1%2 == 0) { A[0][0][0][0] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][0]) - 2.0 * A[1][0][0][0] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][0])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][0]) - 2.0 * A[1][0][0][0] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0][0 +1]) - 2.0 * A[1][0][0][0] + ((0 -1) < 0 ? 0 : A[1][0][0][0 -1])) + A[1][0][0][0];; } } if ((N >= 2) && (t1 == 2*t2) && (t1 == 2*t3) && (t1 <= min(floord(2024*t4-N+2021,2),T-2)) && (t1 >= 1012*t4)) { for (t7=2*t1+2;t7<=2*t1+3;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][0][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][0][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][1][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][0][(-2*t1+t8-2)-1])) + A[0][1][0][(-2*t1+t8-2)];; } } if (t1%2 == 0) { A[1][1][1][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][0]) - 2.0 * A[0][1][1][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][0])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][0]) - 2.0 * A[0][1][1][0] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][1][0 +1]) - 2.0 * A[0][1][1][0] + ((0 -1) < 0 ? 0 : A[0][1][1][0 -1])) + A[0][1][1][0];; } lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][1][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][(-2*t1+t8-2)])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][1][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][1][(-2*t1+t8-2)-1])) + A[0][1][1][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][0][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(-2*t1+t8-3)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][0][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][0][(-2*t1+t8-3)-1])) + A[1][0][0][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][0][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(N-1)]) - 2.0 * A[1][0][0][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(N-1)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(N-1)]) - 2.0 * A[1][0][0][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][0][(N-1)+1]) - 2.0 * A[1][0][0][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][0][(N-1)-1])) + A[1][0][0][(N-1)];; } } if ((t1 == 2*t2) && (t1 == 2*t3) && (t1 <= min(min(floord(2024*t4-N+2021,2),T-2),1012*t4-2)) && (t1 >= ceild(2024*t4-N-1,2))) { for (t7=2*t1+2;t7<=2*t1+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][0][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][0][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][1][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][0][(-2*t1+t8-2)-1])) + A[0][1][0][(-2*t1+t8-2)];; } } lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][1][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][(-2*t1+t8-2)])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][1][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][1][(-2*t1+t8-2)-1])) + A[0][1][1][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][0][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(-2*t1+t8-3)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][0][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][0][(-2*t1+t8-3)-1])) + A[1][0][0][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][0][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(N-1)]) - 2.0 * A[1][0][0][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(N-1)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(N-1)]) - 2.0 * A[1][0][0][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][0][(N-1)+1]) - 2.0 * A[1][0][0][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][0][(N-1)-1])) + A[1][0][0][(N-1)];; } } if ((t1 == 2*t2) && (t1 <= min(min(min(floord(4*t3-N+1,2),floord(2024*t4-N+2021,2)),T-2),2*t3-2)) && (t1 >= max(ceild(4*t3-N-1,2),1012*t4))) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][0 -1])) + A[0][1][(-2*t1+t7-2)][0];; } lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)-1])) + A[1][0][(-2*t1+t7-3)][(N-1)];; } } lbv=2*t1+3; ubv=2*t1+N+2; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][0][(N-1)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][0][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)-1])) + A[1][0][(N-1)][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(min(min(floord(4*t3-N+1,2),floord(2024*t4-N+2021,2)),T-2),1012*t4-2)) && (t1 >= ceild(4*t3-N-1,2))) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)-1])) + A[1][0][(-2*t1+t7-3)][(N-1)];; } } lbv=2024*t4; ubv=2*t1+N+2; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][0][(N-1)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][0][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)-1])) + A[1][0][(N-1)][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(min(floord(2024*t4-N+2021,2),T-2),2*t3-2)) && (t1 >= max(ceild(4*t3-N+2,2),1012*t4))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][0 -1])) + A[0][1][(-2*t1+t7-2)][0];; } lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)-1])) + A[1][0][(-2*t1+t7-3)][(N-1)];; } } } if ((t1 == 2*t2) && (t1 <= min(min(min(floord(2024*t4-N+2021,2),T-2),2*t3-2),1012*t4-2)) && (t1 >= max(ceild(4*t3-N+2,2),ceild(2024*t4-N-1,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)-1])) + A[1][0][(-2*t1+t7-3)][(N-1)];; } } } if ((t1 == 2*t2) && (t1 == 2*t3) && (t1 <= T-2) && (t1 >= max(ceild(2024*t4-N+2022,2),1012*t4))) { for (t7=2*t1+2;t7<=2*t1+3;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][0][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][0][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][1][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][0][(-2*t1+t8-2)-1])) + A[0][1][0][(-2*t1+t8-2)];; } } if (t1%2 == 0) { A[1][1][1][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][0]) - 2.0 * A[0][1][1][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][0])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][0]) - 2.0 * A[0][1][1][0] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][1][0 +1]) - 2.0 * A[0][1][1][0] + ((0 -1) < 0 ? 0 : A[0][1][1][0 -1])) + A[0][1][1][0];; } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][1][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][(-2*t1+t8-2)])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][1][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][1][(-2*t1+t8-2)-1])) + A[0][1][1][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][0][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(-2*t1+t8-3)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][0][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][0][(-2*t1+t8-3)-1])) + A[1][0][0][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 == 2*t3) && (t1 <= min(T-2,1012*t4-2)) && (t1 >= ceild(2024*t4-N+2022,2))) { for (t7=2*t1+2;t7<=2*t1+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][0][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][0][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][1][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][0][(-2*t1+t8-2)-1])) + A[0][1][0][(-2*t1+t8-2)];; } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][1][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][(-2*t1+t8-2)])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][1][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][1][(-2*t1+t8-2)-1])) + A[0][1][1][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][0][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(-2*t1+t8-3)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][0][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][0][(-2*t1+t8-3)-1])) + A[1][0][0][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(floord(4*t3-N+1,2),T-2)) && (t1 >= max(max(ceild(4*t3-N-1,2),ceild(2024*t4-N+2022,2)),1012*t4))) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][0 -1])) + A[0][1][(-2*t1+t7-2)][0];; } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][0][(N-1)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][0][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)-1])) + A[1][0][(N-1)][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(min(floord(4*t3-N+1,2),T-2),1012*t4-2)) && (t1 >= max(ceild(4*t3-N-1,2),ceild(2024*t4-N+2022,2)))) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][0][(N-1)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][0][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)-1])) + A[1][0][(N-1)][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(T-2,2*t3-2)) && (t1 >= max(max(ceild(4*t3-N+2,2),ceild(2024*t4-N+2022,2)),1012*t4))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][0 -1])) + A[0][1][(-2*t1+t7-2)][0];; } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } } if ((t1 == 2*t2) && (t1 <= min(min(T-2,2*t3-2),1012*t4-2)) && (t1 >= max(ceild(4*t3-N+2,2),ceild(2024*t4-N+2022,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } } } } } } } /* End of CLooG code */ // #undef N // #define N 300L #undef T #define T 800L #ifdef TIME gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); printf("|Time taken: %7.5lfs\t", tdiff); printf("|MFLOPS: %f\n", ((((double)NUM_FP_OPS * N *N * N * (T-1)) / tdiff) / 1000000L)); #endif #ifdef VERIFY for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { total+= A[T%2][i][j][k] ; } } } printf("|sum: %e\t", total); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { sum_err_sqr += (A[T%2][i][j][k] - (total/N))*(A[T%2][i][j][k] - (total/N)); } } } printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr)); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { chtotal += ((char *)A[T%2][i][j])[k]; } } } printf("|sum(rep(A)) = %d\n", chtotal); #endif for (l = 0; l < 2; l++){ for (i = 0; i < N; i++){ for (j = 0; j < N; j++) free(A[l][i][j]); // = (double *) malloc(N * sizeof (double)); free(A[l][i]); // = (double **) malloc(N * sizeof(double *)); } free(A[l]); // = (double ***) malloc(N * sizeof(double **)); } return 0; } // icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm // /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/ // /* @ begin PrimeRegTile (scalar_replacement=0; T1t5=4; T1t6=4; T1t7=4; T1t8=4; ) @*/ // /* @ end @*/
rwpng.c
/* ** PNG read/write functions ** ** © 1998-2000 by Greg Roelofs. ** © 2009-2017 by Kornel Lesiński. ** ** See COPYRIGHT file for license. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include "png.h" /* if this include fails, you need to install libpng (e.g. libpng-devel package) and run ./configure */ #include "rwpng.h" #if USE_LCMS #include "lcms2.h" #endif #ifndef Z_BEST_COMPRESSION #define Z_BEST_COMPRESSION 9 #endif #ifndef Z_BEST_SPEED #define Z_BEST_SPEED 1 #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #endif #if PNG_LIBPNG_VER < 10500 typedef png_const_charp png_const_bytep; #endif static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg); pngquant_error rwpng_read_image32_cocoa(FILE *infile, uint32_t *width, uint32_t *height, size_t *file_size, rwpng_rgba **image_data); void rwpng_version_info(FILE *fp) { const char *pngver = png_get_header_ver(NULL); #if USE_COCOA fprintf(fp, " Color profiles are supported via Cocoa. Using libpng %s.\n", pngver); #elif USE_LCMS fprintf(fp, " Color profiles are supported via Little CMS. Using libpng %s.\n", pngver); #else fprintf(fp, " Compiled with no support for color profiles. Using libpng %s.\n", pngver); #endif #if PNG_LIBPNG_VER < 10600 if (strcmp(pngver, "1.3.") < 0) { fputs("\nWARNING: Your version of libpng is outdated and may produce corrupted files.\n" "Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp); } else if (strcmp(pngver, "1.6.") < 0) { #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) fputs("\nWARNING: Your version of libpng is old and has buggy support for custom chunks.\n" "Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp); #endif } #endif } struct rwpng_read_data { FILE *const fp; png_size_t bytes_read; }; #if !USE_COCOA static void user_read_data(png_structp png_ptr, png_bytep data, png_size_t length) { struct rwpng_read_data *read_data = (struct rwpng_read_data *)png_get_io_ptr(png_ptr); png_size_t read = fread(data, 1, length, read_data->fp); if (!read) { png_error(png_ptr, "Read error"); } read_data->bytes_read += read; } #endif struct rwpng_write_state { FILE *outfile; png_size_t maximum_file_size; png_size_t bytes_written; pngquant_error retval; }; static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length) { struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr); if (SUCCESS != write_state->retval) { return; } if (!fwrite(data, length, 1, write_state->outfile)) { write_state->retval = CANT_WRITE_ERROR; } write_state->bytes_written += length; } static void user_flush_data(png_structp png_ptr) { // libpng never calls this :( } static png_bytepp rwpng_create_row_pointers(png_infop info_ptr, png_structp png_ptr, unsigned char *base, unsigned int height, png_size_t rowbytes) { if (!rowbytes) { rowbytes = png_get_rowbytes(png_ptr, info_ptr); } png_bytepp row_pointers = malloc(height * sizeof(row_pointers[0])); if (!row_pointers) return NULL; for(size_t row = 0; row < height; row++) { row_pointers[row] = base + row * rowbytes; } return row_pointers; } #if !USE_COCOA static int read_chunk_callback(png_structp png_ptr, png_unknown_chunkp in_chunk) { if (0 == memcmp("iCCP", in_chunk->name, 5) || 0 == memcmp("cHRM", in_chunk->name, 5) || 0 == memcmp("gAMA", in_chunk->name, 5)) { return 0; // not handled } if (in_chunk->location == 0 ) { return 1; // ignore chunks with invalid location } struct rwpng_chunk **head = (struct rwpng_chunk **)png_get_user_chunk_ptr(png_ptr); struct rwpng_chunk *chunk = malloc(sizeof(struct rwpng_chunk)); memcpy(chunk->name, in_chunk->name, 5); chunk->size = in_chunk->size; chunk->location = in_chunk->location; chunk->data = in_chunk->size ? malloc(in_chunk->size) : NULL; if (in_chunk->size) { memcpy(chunk->data, in_chunk->data, in_chunk->size); } chunk->next = *head; *head = chunk; return 1; // marks as "handled", libpng won't store it } #endif /* retval: 0 = success 21 = bad sig 22 = bad IHDR 24 = insufficient memory 25 = libpng error (via longjmp()) 26 = wrong PNG color type (no alpha channel) */ #if !USE_COCOA static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg) { fprintf(stderr, " libpng warning: %s\n", msg); } static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg) { } static pngquant_error rwpng_read_image24_libpng(FILE *infile, png24_image *mainprog_ptr, int strip, int verbose) { png_structp png_ptr = NULL; png_infop info_ptr = NULL; png_size_t rowbytes; int color_type, bit_depth; png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, verbose ? rwpng_warning_stderr_handler : rwpng_warning_silent_handler); if (!png_ptr) { return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */ } info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { png_destroy_read_struct(&png_ptr, NULL, NULL); return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */ } /* setjmp() must be called in every function that calls a non-trivial * libpng function */ if (setjmp(mainprog_ptr->jmpbuf)) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return LIBPNG_FATAL_ERROR; /* fatal libpng error (via longjmp()) */ } #if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED) png_set_option(png_ptr, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON); #endif #if PNG_LIBPNG_VER >= 10500 && defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) if (!strip) { /* copy standard chunks too */ png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_IF_SAFE, (png_const_bytep)"pHYs\0iTXt\0tEXt\0zTXt", 4); } #endif if (!strip) { png_set_read_user_chunk_fn(png_ptr, &mainprog_ptr->chunks, read_chunk_callback); } struct rwpng_read_data read_data = {infile, 0}; png_set_read_fn(png_ptr, &read_data, user_read_data); png_read_info(png_ptr, info_ptr); /* read all PNG info up to image data */ /* alternatively, could make separate calls to png_get_image_width(), * etc., but want bit_depth and color_type for later [don't care about * compression_type and filter_type => NULLs] */ png_get_IHDR(png_ptr, info_ptr, &mainprog_ptr->width, &mainprog_ptr->height, &bit_depth, &color_type, NULL, NULL, NULL); /* expand palette images to RGB, low-bit-depth grayscale images to 8 bits, * transparency chunks to full alpha channel; strip 16-bit-per-sample * images to 8 bits per sample; and convert grayscale to RGB[A] */ /* GRR TO DO: preserve all safe-to-copy ancillary PNG chunks */ if (!(color_type & PNG_COLOR_MASK_ALPHA)) { #ifdef PNG_READ_FILLER_SUPPORTED png_set_expand(png_ptr); png_set_filler(png_ptr, 65535L, PNG_FILLER_AFTER); #else fprintf(stderr, "pngquant readpng: image is neither RGBA nor GA\n"); png_destroy_read_struct(&png_ptr, &info_ptr, NULL); mainprog_ptr->retval = WRONG_INPUT_COLOR_TYPE; return mainprog_ptr->retval; #endif } if (bit_depth == 16) { png_set_strip_16(png_ptr); } if (!(color_type & PNG_COLOR_MASK_COLOR)) { png_set_gray_to_rgb(png_ptr); } /* get source gamma for gamma correction, or use sRGB default */ double gamma = 0.45455; if (png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) { mainprog_ptr->input_color = RWPNG_SRGB; mainprog_ptr->output_color = RWPNG_SRGB; } else { png_get_gAMA(png_ptr, info_ptr, &gamma); if (gamma > 0 && gamma <= 1.0) { mainprog_ptr->input_color = RWPNG_GAMA_ONLY; mainprog_ptr->output_color = RWPNG_GAMA_ONLY; } else { fprintf(stderr, "pngquant readpng: ignored out-of-range gamma %f\n", gamma); mainprog_ptr->input_color = RWPNG_NONE; mainprog_ptr->output_color = RWPNG_NONE; gamma = 0.45455; } } mainprog_ptr->gamma = gamma; png_set_interlace_handling(png_ptr); /* all transformations have been registered; now update info_ptr data, * get rowbytes and channels, and allocate image memory */ png_read_update_info(png_ptr, info_ptr); rowbytes = png_get_rowbytes(png_ptr, info_ptr); // For overflow safety reject images that won't fit in 32-bit if (rowbytes > INT_MAX/mainprog_ptr->height) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return PNG_OUT_OF_MEMORY_ERROR; } if ((mainprog_ptr->rgba_data = malloc(rowbytes * mainprog_ptr->height)) == NULL) { fprintf(stderr, "pngquant readpng: unable to allocate image data\n"); png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return PNG_OUT_OF_MEMORY_ERROR; } png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0); /* now we can go ahead and just read the whole image */ png_read_image(png_ptr, row_pointers); /* and we're done! (png_read_end() can be omitted if no processing of * post-IDAT text/time/etc. is desired) */ png_read_end(png_ptr, NULL); #if USE_LCMS #if PNG_LIBPNG_VER < 10500 png_charp ProfileData; #else png_bytep ProfileData; #endif png_uint_32 ProfileLen; cmsHPROFILE hInProfile = NULL; /* color_type is read from the image before conversion to RGBA */ int COLOR_PNG = color_type & PNG_COLOR_MASK_COLOR; /* embedded ICC profile */ if (png_get_iCCP(png_ptr, info_ptr, &(png_charp){0}, &(int){0}, &ProfileData, &ProfileLen)) { hInProfile = cmsOpenProfileFromMem(ProfileData, ProfileLen); cmsColorSpaceSignature colorspace = cmsGetColorSpace(hInProfile); /* only RGB (and GRAY) valid for PNGs */ if (colorspace == cmsSigRgbData && COLOR_PNG) { mainprog_ptr->input_color = RWPNG_ICCP; mainprog_ptr->output_color = RWPNG_SRGB; } else { if (colorspace == cmsSigGrayData && !COLOR_PNG) { mainprog_ptr->input_color = RWPNG_ICCP_WARN_GRAY; mainprog_ptr->output_color = RWPNG_SRGB; } cmsCloseProfile(hInProfile); hInProfile = NULL; } } /* build RGB profile from cHRM and gAMA */ if (hInProfile == NULL && COLOR_PNG && !png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB) && png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) && png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) { cmsCIExyY WhitePoint; cmsCIExyYTRIPLE Primaries; png_get_cHRM(png_ptr, info_ptr, &WhitePoint.x, &WhitePoint.y, &Primaries.Red.x, &Primaries.Red.y, &Primaries.Green.x, &Primaries.Green.y, &Primaries.Blue.x, &Primaries.Blue.y); WhitePoint.Y = Primaries.Red.Y = Primaries.Green.Y = Primaries.Blue.Y = 1.0; cmsToneCurve *GammaTable[3]; GammaTable[0] = GammaTable[1] = GammaTable[2] = cmsBuildGamma(NULL, 1/gamma); hInProfile = cmsCreateRGBProfile(&WhitePoint, &Primaries, GammaTable); cmsFreeToneCurve(GammaTable[0]); mainprog_ptr->input_color = RWPNG_GAMA_CHRM; mainprog_ptr->output_color = RWPNG_SRGB; } /* transform image to sRGB colorspace */ if (hInProfile != NULL) { cmsHPROFILE hOutProfile = cmsCreate_sRGBProfile(); cmsHTRANSFORM hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_8, hOutProfile, TYPE_RGBA_8, INTENT_PERCEPTUAL, omp_get_max_threads() > 1 ? cmsFLAGS_NOCACHE : 0); #pragma omp parallel for \ if (mainprog_ptr->height*mainprog_ptr->width > 8000) \ schedule(static) for (unsigned int i = 0; i < mainprog_ptr->height; i++) { /* It is safe to use the same block for input and output, when both are of the same TYPE. */ cmsDoTransform(hTransform, row_pointers[i], row_pointers[i], mainprog_ptr->width); } cmsDeleteTransform(hTransform); cmsCloseProfile(hOutProfile); cmsCloseProfile(hInProfile); mainprog_ptr->gamma = 0.45455; } #endif png_destroy_read_struct(&png_ptr, &info_ptr, NULL); mainprog_ptr->file_size = read_data.bytes_read; mainprog_ptr->row_pointers = (unsigned char **)row_pointers; return SUCCESS; } #endif static void rwpng_free_chunks(struct rwpng_chunk *chunk) { if (!chunk) return; rwpng_free_chunks(chunk->next); free(chunk->data); free(chunk); } void rwpng_free_image24(png24_image *image) { free(image->row_pointers); image->row_pointers = NULL; free(image->rgba_data); image->rgba_data = NULL; rwpng_free_chunks(image->chunks); image->chunks = NULL; } void rwpng_free_image8(png8_image *image) { free(image->indexed_data); image->indexed_data = NULL; free(image->row_pointers); image->row_pointers = NULL; rwpng_free_chunks(image->chunks); image->chunks = NULL; } pngquant_error rwpng_read_image24(FILE *infile, png24_image *out, int strip, int verbose) { #if USE_COCOA rwpng_rgba *pixel_data; pngquant_error res = rwpng_read_image32_cocoa(infile, &out->width, &out->height, &out->file_size, &pixel_data); if (res != SUCCESS) { return res; } out->gamma = 0.45455; out->input_color = RWPNG_COCOA; out->output_color = RWPNG_SRGB; out->rgba_data = (unsigned char *)pixel_data; out->row_pointers = malloc(sizeof(out->row_pointers[0])*out->height); for(int i=0; i < out->height; i++) { out->row_pointers[i] = (unsigned char *)&pixel_data[out->width*i]; } return SUCCESS; #else return rwpng_read_image24_libpng(infile, out, strip, verbose); #endif } static pngquant_error rwpng_write_image_init(rwpng_png_image *mainprog_ptr, png_structpp png_ptr_p, png_infopp info_ptr_p, int fast_compression) { /* could also replace libpng warning-handler (final NULL), but no need: */ *png_ptr_p = png_create_write_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, NULL); if (!(*png_ptr_p)) { return LIBPNG_INIT_ERROR; /* out of memory */ } *info_ptr_p = png_create_info_struct(*png_ptr_p); if (!(*info_ptr_p)) { png_destroy_write_struct(png_ptr_p, NULL); return LIBPNG_INIT_ERROR; /* out of memory */ } /* setjmp() must be called in every function that calls a PNG-writing * libpng function, unless an alternate error handler was installed-- * but compatible error handlers must either use longjmp() themselves * (as in this program) or exit immediately, so here we go: */ if (setjmp(mainprog_ptr->jmpbuf)) { png_destroy_write_struct(png_ptr_p, info_ptr_p); return LIBPNG_INIT_ERROR; /* libpng error (via longjmp()) */ } png_set_compression_level(*png_ptr_p, fast_compression ? Z_BEST_SPEED : Z_BEST_COMPRESSION); png_set_compression_mem_level(*png_ptr_p, fast_compression ? 9 : 5); // judging by optipng results, smaller mem makes libpng compress slightly better return SUCCESS; } static void rwpng_write_end(png_infopp info_ptr_p, png_structpp png_ptr_p, png_bytepp row_pointers) { png_write_info(*png_ptr_p, *info_ptr_p); png_set_packing(*png_ptr_p); png_write_image(*png_ptr_p, row_pointers); png_write_end(*png_ptr_p, NULL); png_destroy_write_struct(png_ptr_p, info_ptr_p); } static void rwpng_set_gamma(png_infop info_ptr, png_structp png_ptr, double gamma, rwpng_color_transform color) { if (color != RWPNG_GAMA_ONLY && color != RWPNG_NONE) { png_set_gAMA(png_ptr, info_ptr, gamma); } if (color == RWPNG_SRGB) { png_set_sRGB(png_ptr, info_ptr, 0); // 0 = Perceptual } } pngquant_error rwpng_write_image8(FILE *outfile, png8_image *mainprog_ptr) { png_structp png_ptr; png_infop info_ptr; if (mainprog_ptr->num_palette > 256) return INVALID_ARGUMENT; pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, mainprog_ptr->fast_compression); if (retval) return retval; struct rwpng_write_state write_state; write_state = (struct rwpng_write_state){ .outfile = outfile, .maximum_file_size = mainprog_ptr->maximum_file_size, .retval = SUCCESS, }; png_set_write_fn(png_ptr, &write_state, user_write_data, user_flush_data); // Palette images generally don't gain anything from filtering png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_VALUE_NONE); rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color); /* set the image parameters appropriately */ int sample_depth; #if PNG_LIBPNG_VER > 10400 /* old libpng corrupts files with low depth */ if (mainprog_ptr->num_palette <= 2) sample_depth = 1; else if (mainprog_ptr->num_palette <= 4) sample_depth = 2; else if (mainprog_ptr->num_palette <= 16) sample_depth = 4; else #endif sample_depth = 8; struct rwpng_chunk *chunk = mainprog_ptr->chunks; mainprog_ptr->metadata_size = 0; int chunk_num=0; while(chunk) { png_unknown_chunk pngchunk = { .size = chunk->size, .data = chunk->data, .location = chunk->location, }; memcpy(pngchunk.name, chunk->name, 5); png_set_unknown_chunks(png_ptr, info_ptr, &pngchunk, 1); #if defined(PNG_HAVE_IHDR) && PNG_LIBPNG_VER < 10600 png_set_unknown_chunk_location(png_ptr, info_ptr, chunk_num, pngchunk.location ? pngchunk.location : PNG_HAVE_IHDR); #endif mainprog_ptr->metadata_size += chunk->size + 12; chunk = chunk->next; chunk_num++; } png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height, sample_depth, PNG_COLOR_TYPE_PALETTE, 0, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_BASE); png_color palette[256]; png_byte trans[256]; unsigned int num_trans = 0; for(unsigned int i = 0; i < mainprog_ptr->num_palette; i++) { palette[i] = (png_color){ .red = mainprog_ptr->palette[i].r, .green = mainprog_ptr->palette[i].g, .blue = mainprog_ptr->palette[i].b, }; trans[i] = mainprog_ptr->palette[i].a; if (mainprog_ptr->palette[i].a < 255) { num_trans = i+1; } } png_set_PLTE(png_ptr, info_ptr, palette, mainprog_ptr->num_palette); if (num_trans > 0) { png_set_tRNS(png_ptr, info_ptr, trans, num_trans, NULL); } rwpng_write_end(&info_ptr, &png_ptr, mainprog_ptr->row_pointers); if (SUCCESS == write_state.retval && write_state.maximum_file_size && write_state.bytes_written > write_state.maximum_file_size) { return TOO_LARGE_FILE; } return write_state.retval; } pngquant_error rwpng_write_image24(FILE *outfile, const png24_image *mainprog_ptr) { png_structp png_ptr; png_infop info_ptr; pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, 0); if (retval) return retval; png_init_io(png_ptr, outfile); rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color); png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height, 8, PNG_COLOR_TYPE_RGB_ALPHA, 0, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_BASE); png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0); rwpng_write_end(&info_ptr, &png_ptr, row_pointers); free(row_pointers); return SUCCESS; } static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg) { rwpng_png_image *mainprog_ptr; /* This function, aside from the extra step of retrieving the "error * pointer" (below) and the fact that it exists within the application * rather than within libpng, is essentially identical to libpng's * default error handler. The second point is critical: since both * setjmp() and longjmp() are called from the same code, they are * guaranteed to have compatible notions of how big a jmp_buf is, * regardless of whether _BSD_SOURCE or anything else has (or has not) * been defined. */ fprintf(stderr, " error: %s (libpng failed)\n", msg); fflush(stderr); mainprog_ptr = png_get_error_ptr(png_ptr); if (mainprog_ptr == NULL) abort(); longjmp(mainprog_ptr->jmpbuf, 1); }
test_omp.c
/* * gcc -O2 -fopenmp -o test_omp test_omp.c * * [pthread-w32, gcc-4.6.2-20110801] * Hello World from thread = 0 * Number of threads = 2 * Hello World from thread = 1 * * [Linux-2.6.32, gcc-4.4.5] * Hello World from thread = 0 * Number of threads = 8 * Hello World from thread = 2 * Hello World from thread = 4 * Hello World from thread = 5 * Hello World from thread = 7 * Hello World from thread = 3 * Hello World from thread = 6 * Hello World from thread = 1 */ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ return 0; }
GB_unop__identity_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int8_int8 // op(A') function: GB_unop_tran__identity_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fp32_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_uint64) // op(A') function: GB (_unop_tran__identity_fp32_uint64) // C type: float // A type: uint64_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_uint64) ( float *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__eq_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint64) // A*D function (colscale): GB (_AxD__eq_uint64) // D*A function (rowscale): GB (_DxB__eq_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint64) // C=scalar+B GB (_bind1st__eq_uint64) // C=scalar+B' GB (_bind1st_tran__eq_uint64) // C=A+scalar GB (_bind2nd__eq_uint64) // C=A'+scalar GB (_bind2nd_tran__eq_uint64) // C type: bool // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT64 || GxB_NO_EQ_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
max_SSE.c
/* * File: max_SSE.c * Author: Malcolm Davis * Course: Computer Architecture II * Created on Apr 20, 2018 * 16 bit values vector max * * Usage: * ./max for default parameters and random vectors or; * ./max v1.1 v1.2 v1.3 v1.4 v1.5 v1.6 v1.7 v1.8 v2.1 v2.2 v2.3 v2.4 v2.5 v2.6 v2.7 v2.8 [ v3.1 ...] */ #include <emmintrin.h>//v3 #include <smmintrin.h>//v4 #include <stdio.h> #include <stdlib.h> #include <time.h> void usage(){ printf("Usage:\n ./max for default parameters and random vectors or;\n\ ./max v1.1 v1.2 v1.3 v1.4 v1.5 v1.6 v1.7 v1.8 v2.1 v2.2 v2.3 v2.4 v2.5 v2.6 v2.7 v2.8 [ vn.1 ...]\n"); } /* * Prints a __m128i vector on console * @param v the vector to print */ void printVector(__m128i* v){ int16_t * pointer = (int16_t*)v; for (int i = 0; i < 8; ++i) { printf("%d\t", *pointer); pointer++; } printf("\n"); } /* * Main method, retrive command line options, and loops comparing vectors */ int main(int argc, char ** argv){ //If the count of the input is not a multiple of 8 greater than 2, then exit if (((argc-1)%8)!=0 || (int)((argc-1)/8) == 1){ usage(); return -1; } __m128i *vector1,* vector2, result; static int16_t v1[8], v2[8]; if(argc == 1){ srand (time(NULL)); //If no arguments then generate random vectors #pragma omp parallel for for (int i = 0; i < 8; ++i) { v1[i] = rand()%32767; v2[i] = rand()%32767; } } else{ //If arguments then set the values into a vector #pragma omp parallel for for (int i = 0; i < 8; ++i) { v1[i]=atoi(argv[i+1]); v2[i]=atoi(argv[i+9]); } } vector1 = (__m128i*)v1; vector2 = (__m128i*)v2; printf("Vector 1: "); printVector(vector1); printf("Vector 2: "); printVector(vector2); result = _mm_max_epi16(*vector1, *vector2); //If there are more than 2 vectors, loop and compare if((int)((argc-1)/8) >2){ for (int i = 2; i < (argc-1)/8; ++i) { for (int j = 0; j < 8; ++j) { v1[j]=*((int16_t*)(&result)+j); v2[j]=atoi(argv[i*8+j+1]); } vector1 = (__m128i*)v1; vector2 = (__m128i*)v2; printf("Vector %d: ", i+1); printVector(vector2); result = _mm_max_epi16(*vector1, *vector2); } } printf("Result *********************** \n"); printf("Result: "); printVector(&result); }
dashlane_fmt_plug.c
/* * JtR format to crack Dashlane Password Manager files. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it * is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Special thanks goes to Robin Lambertz for making this work possible. */ #include "arch.h" #if !AC_BUILT #define HAVE_LIBZ 1 /* legacy build has -lz in LDFLAGS */ #endif #if HAVE_LIBZ #if FMT_EXTERNS_H extern struct fmt_main fmt_dashlane; #elif FMT_REGISTERS_H john_register_one(&fmt_dashlane); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 4 #endif #endif #include <openssl/evp.h> #include <zlib.h> #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "sha2.h" #include "jumbo.h" #include "pbkdf2_hmac_sha1.h" #include "dashlane_common.h" #include "openssl_code.h" #include "hmac_sha.h" #include "memdbg.h" #define FORMAT_NAME "Dashlane Password Manager" #define FORMAT_LABEL "dashlane" #define FORMAT_TAG "$dashlane$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "AES PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "AES PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 125 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct custom_salt *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void dashlane_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; memset(cracked, 0, sizeof(cracked[0]) * cracked_count); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { unsigned char pkey[MAX_KEYS_PER_CRYPT][32]; int i; #ifdef SIMD_COEF_32 int len[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; pout[i] = pkey[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, 32, 10204, pout, 32, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { pbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, 32, 10204, pkey[i], 32, 0); } #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { if (dashlane_verify(cur_salt, pkey[i])) cracked[index+i] = 1; else cracked[index+i] = 0; } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_dashlane = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, dashlane_tests }, { init, done, fmt_default_reset, fmt_default_prepare, dashlane_valid, fmt_default_split, fmt_default_binary, dashlane_get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, dashlane_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_LIBZ */
omp-low.c
/* Lowering pass for OpenMP directives. Converts OpenMP directives into explicit calls to the runtime library (libgomp) and data marshalling to implement data sharing and copying clauses. Contributed by Diego Novillo <dnovillo@redhat.com> Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "tree-gimple.h" #include "tree-inline.h" #include "langhooks.h" #include "diagnostic.h" #include "tree-flow.h" #include "timevar.h" #include "flags.h" #include "function.h" #include "expr.h" #include "toplev.h" #include "tree-pass.h" #include "ggc.h" #include "except.h" #include "splay-tree.h" #include "optabs.h" #include "cfgloop.h" /* Lowering of OpenMP parallel and workshare constructs proceeds in two phases. The first phase scans the function looking for OMP statements and then for variables that must be replaced to satisfy data sharing clauses. The second phase expands code for the constructs, as well as re-gimplifying things when variables have been replaced with complex expressions. Final code generation is done by pass_expand_omp. The flowgraph is scanned for parallel regions which are then moved to a new function, to be invoked by the thread library. */ /* Context structure. Used to store information about each parallel directive in the code. */ typedef struct omp_context { /* This field must be at the beginning, as we do "inheritance": Some callback functions for tree-inline.c (e.g., omp_copy_decl) receive a copy_body_data pointer that is up-casted to an omp_context pointer. */ copy_body_data cb; /* The tree of contexts corresponding to the encountered constructs. */ struct omp_context *outer; tree stmt; /* Map variables to fields in a structure that allows communication between sending and receiving threads. */ splay_tree field_map; tree record_type; tree sender_decl; tree receiver_decl; /* A chain of variables to add to the top-level block surrounding the construct. In the case of a parallel, this is in the child function. */ tree block_vars; /* What to do with variables with implicitly determined sharing attributes. */ enum omp_clause_default_kind default_kind; /* Nesting depth of this context. Used to beautify error messages re invalid gotos. The outermost ctx is depth 1, with depth 0 being reserved for the main body of the function. */ int depth; /* True if this parallel directive is nested within another. */ bool is_nested; } omp_context; /* A structure describing the main elements of a parallel loop. */ struct omp_for_data { tree v, n1, n2, step, chunk_size, for_stmt; enum tree_code cond_code; tree pre; bool have_nowait, have_ordered; enum omp_clause_schedule_kind sched_kind; }; static splay_tree all_contexts; static int parallel_nesting_level; struct omp_region *root_omp_region; static void scan_omp (tree *, omp_context *); static void lower_omp (tree *, omp_context *); static tree lookup_decl_in_outer_ctx (tree, omp_context *); static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *); /* Find an OpenMP clause of type KIND within CLAUSES. */ tree find_omp_clause (tree clauses, enum tree_code kind) { for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses)) if (OMP_CLAUSE_CODE (clauses) == kind) return clauses; return NULL_TREE; } /* Return true if CTX is for an omp parallel. */ static inline bool is_parallel_ctx (omp_context *ctx) { return TREE_CODE (ctx->stmt) == OMP_PARALLEL; } /* Return true if REGION is a combined parallel+workshare region. */ static inline bool is_combined_parallel (struct omp_region *region) { return region->is_combined_parallel; } /* Extract the header elements of parallel loop FOR_STMT and store them into *FD. */ static void extract_omp_for_data (tree for_stmt, struct omp_for_data *fd) { tree t, var; fd->for_stmt = for_stmt; fd->pre = NULL; t = OMP_FOR_INIT (for_stmt); gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT); fd->v = GIMPLE_STMT_OPERAND (t, 0); gcc_assert (SSA_VAR_P (fd->v)); gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE); var = TREE_CODE (fd->v) == SSA_NAME ? SSA_NAME_VAR (fd->v) : fd->v; fd->n1 = GIMPLE_STMT_OPERAND (t, 1); t = OMP_FOR_COND (for_stmt); fd->cond_code = TREE_CODE (t); gcc_assert (TREE_OPERAND (t, 0) == var); fd->n2 = TREE_OPERAND (t, 1); switch (fd->cond_code) { case LT_EXPR: case GT_EXPR: break; case LE_EXPR: fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2, build_int_cst (TREE_TYPE (fd->n2), 1)); fd->cond_code = LT_EXPR; break; case GE_EXPR: fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2, build_int_cst (TREE_TYPE (fd->n2), 1)); fd->cond_code = GT_EXPR; break; default: gcc_unreachable (); } t = OMP_FOR_INCR (fd->for_stmt); gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT); gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var); t = GIMPLE_STMT_OPERAND (t, 1); gcc_assert (TREE_OPERAND (t, 0) == var); switch (TREE_CODE (t)) { case PLUS_EXPR: fd->step = TREE_OPERAND (t, 1); break; case MINUS_EXPR: fd->step = TREE_OPERAND (t, 1); fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step); break; default: gcc_unreachable (); } fd->have_nowait = fd->have_ordered = false; fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; fd->chunk_size = NULL_TREE; for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t)) switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_NOWAIT: fd->have_nowait = true; break; case OMP_CLAUSE_ORDERED: fd->have_ordered = true; break; case OMP_CLAUSE_SCHEDULE: fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t); fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t); break; default: break; } if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) gcc_assert (fd->chunk_size == NULL); else if (fd->chunk_size == NULL) { /* We only need to compute a default chunk size for ordered static loops and dynamic loops. */ if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered) fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) ? integer_zero_node : integer_one_node; } } /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB is the immediate dominator of PAR_ENTRY_BB, return true if there are no data dependencies that would prevent expanding the parallel directive at PAR_ENTRY_BB as a combined parallel+workshare region. When expanding a combined parallel+workshare region, the call to the child function may need additional arguments in the case of OMP_FOR regions. In some cases, these arguments are computed out of variables passed in from the parent to the child via 'struct .omp_data_s'. For instance: #pragma omp parallel for schedule (guided, i * 4) for (j ...) Is lowered into: # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) # BLOCK 3 (WS_ENTRY_BB) .omp_data_i = &.omp_data_o; D.1667 = .omp_data_i->i; D.1598 = D.1667 * 4; #pragma omp for schedule (guided, D.1598) When we outline the parallel region, the call to the child function 'bar.omp_fn.0' will need the value D.1598 in its argument list, but that value is computed *after* the call site. So, in principle we cannot do the transformation. To see whether the code in WS_ENTRY_BB blocks the combined parallel+workshare call, we collect all the variables used in the OMP_FOR header check whether they appear on the LHS of any statement in WS_ENTRY_BB. If so, then we cannot emit the combined call. FIXME. If we had the SSA form built at this point, we could merely hoist the code in block 3 into block 2 and be done with it. But at this point we don't have dataflow information and though we could hack something up here, it is really not worth the aggravation. */ static bool workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb) { struct omp_for_data fd; tree par_stmt, ws_stmt; par_stmt = last_stmt (par_entry_bb); ws_stmt = last_stmt (ws_entry_bb); if (TREE_CODE (ws_stmt) == OMP_SECTIONS) return true; gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR); extract_omp_for_data (ws_stmt, &fd); /* FIXME. We give up too easily here. If any of these arguments are not constants, they will likely involve variables that have been mapped into fields of .omp_data_s for sharing with the child function. With appropriate data flow, it would be possible to see through this. */ if (!is_gimple_min_invariant (fd.n1) || !is_gimple_min_invariant (fd.n2) || !is_gimple_min_invariant (fd.step) || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) return false; return true; } /* Collect additional arguments needed to emit a combined parallel+workshare call. WS_STMT is the workshare directive being expanded. */ static tree get_ws_args_for (tree ws_stmt) { tree t; if (TREE_CODE (ws_stmt) == OMP_FOR) { struct omp_for_data fd; tree ws_args; extract_omp_for_data (ws_stmt, &fd); ws_args = NULL_TREE; if (fd.chunk_size) { t = fold_convert (long_integer_type_node, fd.chunk_size); ws_args = tree_cons (NULL, t, ws_args); } t = fold_convert (long_integer_type_node, fd.step); ws_args = tree_cons (NULL, t, ws_args); t = fold_convert (long_integer_type_node, fd.n2); ws_args = tree_cons (NULL, t, ws_args); t = fold_convert (long_integer_type_node, fd.n1); ws_args = tree_cons (NULL, t, ws_args); return ws_args; } else if (TREE_CODE (ws_stmt) == OMP_SECTIONS) { /* Number of sections is equal to the number of edges from the OMP_SECTIONS_SWITCH statement, except for the one to the exit of the sections region. */ basic_block bb = single_succ (bb_for_stmt (ws_stmt)); t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1); t = tree_cons (NULL, t, NULL); return t; } gcc_unreachable (); } /* Discover whether REGION is a combined parallel+workshare region. */ static void determine_parallel_type (struct omp_region *region) { basic_block par_entry_bb, par_exit_bb; basic_block ws_entry_bb, ws_exit_bb; if (region == NULL || region->inner == NULL || region->exit == NULL || region->inner->exit == NULL || region->inner->cont == NULL) return; /* We only support parallel+for and parallel+sections. */ if (region->type != OMP_PARALLEL || (region->inner->type != OMP_FOR && region->inner->type != OMP_SECTIONS)) return; /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and WS_EXIT_BB -> PAR_EXIT_BB. */ par_entry_bb = region->entry; par_exit_bb = region->exit; ws_entry_bb = region->inner->entry; ws_exit_bb = region->inner->exit; if (single_succ (par_entry_bb) == ws_entry_bb && single_succ (ws_exit_bb) == par_exit_bb && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb) && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb)) || (last_and_only_stmt (ws_entry_bb) && last_and_only_stmt (par_exit_bb)))) { tree ws_stmt = last_stmt (ws_entry_bb); if (region->inner->type == OMP_FOR) { /* If this is a combined parallel loop, we need to determine whether or not to use the combined library calls. There are two cases where we do not apply the transformation: static loops and any kind of ordered loop. In the first case, we already open code the loop so there is no need to do anything else. In the latter case, the combined parallel loop call would still need extra synchronization to implement ordered semantics, so there would not be any gain in using the combined call. */ tree clauses = OMP_FOR_CLAUSES (ws_stmt); tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE); if (c == NULL || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC || find_omp_clause (clauses, OMP_CLAUSE_ORDERED)) { region->is_combined_parallel = false; region->inner->is_combined_parallel = false; return; } } region->is_combined_parallel = true; region->inner->is_combined_parallel = true; region->ws_args = get_ws_args_for (ws_stmt); } } /* Return true if EXPR is variable sized. */ static inline bool is_variable_sized (const_tree expr) { return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr))); } /* Return true if DECL is a reference type. */ static inline bool is_reference (tree decl) { return lang_hooks.decls.omp_privatize_by_reference (decl); } /* Lookup variables in the decl or field splay trees. The "maybe" form allows for the variable form to not have been entered, otherwise we assert that the variable must have been entered. */ static inline tree lookup_decl (tree var, omp_context *ctx) { tree *n; n = (tree *) pointer_map_contains (ctx->cb.decl_map, var); return *n; } static inline tree maybe_lookup_decl (const_tree var, omp_context *ctx) { tree *n; n = (tree *) pointer_map_contains (ctx->cb.decl_map, var); return n ? *n : NULL_TREE; } static inline tree lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return (tree) n->value; } static inline tree maybe_lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } /* Return true if DECL should be copied by pointer. SHARED_CTX is the parallel context if DECL is to be shared. */ static bool use_pointer_for_field (const_tree decl, omp_context *shared_ctx) { if (AGGREGATE_TYPE_P (TREE_TYPE (decl))) return true; /* We can only use copy-in/copy-out semantics for shared variables when we know the value is not accessible from an outer scope. */ if (shared_ctx) { /* ??? Trivially accessible from anywhere. But why would we even be passing an address in this case? Should we simply assert this to be false, or should we have a cleanup pass that removes these from the list of mappings? */ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) return true; /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell without analyzing the expression whether or not its location is accessible to anyone else. In the case of nested parallel regions it certainly may be. */ if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl)) return true; /* Do not use copy-in/copy-out for variables that have their address taken. */ if (TREE_ADDRESSABLE (decl)) return true; /* Disallow copy-in/out in nested parallel if decl is shared in outer parallel, otherwise each thread could store the shared variable in its own copy-in location, making the variable no longer really shared. */ if (!TREE_READONLY (decl) && shared_ctx->is_nested) { omp_context *up; for (up = shared_ctx->outer; up; up = up->outer) if (maybe_lookup_decl (decl, up)) break; if (up && is_parallel_ctx (up)) { tree c; for (c = OMP_PARALLEL_CLAUSES (up->stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED && OMP_CLAUSE_DECL (c) == decl) break; if (c) return true; } } } return false; } /* Create a new VAR_DECL and copy information from VAR to it. */ tree copy_var_decl (tree var, tree name, tree type) { tree copy = build_decl (VAR_DECL, name, type); TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var); TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var); DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var); DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var); DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var); DECL_IGNORED_P (copy) = DECL_IGNORED_P (var); DECL_CONTEXT (copy) = DECL_CONTEXT (var); DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var); TREE_USED (copy) = 1; DECL_SEEN_IN_BIND_EXPR_P (copy) = 1; return copy; } /* Construct a new automatic decl similar to VAR. */ static tree omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx) { tree copy = copy_var_decl (var, name, type); DECL_CONTEXT (copy) = current_function_decl; TREE_CHAIN (copy) = ctx->block_vars; ctx->block_vars = copy; return copy; } static tree omp_copy_decl_1 (tree var, omp_context *ctx) { return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx); } /* Build tree nodes to access the field for VAR on the receiver side. */ static tree build_receiver_ref (tree var, bool by_ref, omp_context *ctx) { tree x, field = lookup_field (var, ctx); /* If the receiver record type was remapped in the child function, remap the field into the new record type. */ x = maybe_lookup_field (field, ctx); if (x != NULL) field = x; x = build_fold_indirect_ref (ctx->receiver_decl); x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL); if (by_ref) x = build_fold_indirect_ref (x); return x; } /* Build tree nodes to access VAR in the scope outer to CTX. In the case of a parallel, this is a component reference; for workshare constructs this is some variable. */ static tree build_outer_var_ref (tree var, omp_context *ctx) { tree x; if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))) x = var; else if (is_variable_sized (var)) { x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0); x = build_outer_var_ref (x, ctx); x = build_fold_indirect_ref (x); } else if (is_parallel_ctx (ctx)) { bool by_ref = use_pointer_for_field (var, NULL); x = build_receiver_ref (var, by_ref, ctx); } else if (ctx->outer) x = lookup_decl (var, ctx->outer); else if (is_reference (var)) /* This can happen with orphaned constructs. If var is reference, it is possible it is shared and as such valid. */ x = var; else gcc_unreachable (); if (is_reference (var)) x = build_fold_indirect_ref (x); return x; } /* Build tree nodes to access the field for VAR on the sender side. */ static tree build_sender_ref (tree var, omp_context *ctx) { tree field = lookup_field (var, ctx); return build3 (COMPONENT_REF, TREE_TYPE (field), ctx->sender_decl, field, NULL); } /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */ static void install_var_field (tree var, bool by_ref, omp_context *ctx) { tree field, type; gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var)); type = TREE_TYPE (var); if (by_ref) type = build_pointer_type (type); field = build_decl (FIELD_DECL, DECL_NAME (var), type); /* Remember what variable this field was created for. This does have a side effect of making dwarf2out ignore this member, so for helpful debugging we clear it later in delete_omp_context. */ DECL_ABSTRACT_ORIGIN (field) = var; insert_field_into_struct (ctx->record_type, field); splay_tree_insert (ctx->field_map, (splay_tree_key) var, (splay_tree_value) field); } static tree install_var_local (tree var, omp_context *ctx) { tree new_var = omp_copy_decl_1 (var, ctx); insert_decl_map (&ctx->cb, var, new_var); return new_var; } /* Adjust the replacement for DECL in CTX for the new context. This means copying the DECL_VALUE_EXPR, and fixing up the type. */ static void fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug) { tree new_decl, size; new_decl = lookup_decl (decl, ctx); TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb); if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug) && DECL_HAS_VALUE_EXPR_P (decl)) { tree ve = DECL_VALUE_EXPR (decl); walk_tree (&ve, copy_body_r, &ctx->cb, NULL); SET_DECL_VALUE_EXPR (new_decl, ve); DECL_HAS_VALUE_EXPR_P (new_decl) = 1; } if (!TREE_CONSTANT (DECL_SIZE (new_decl))) { size = remap_decl (DECL_SIZE (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE (TREE_TYPE (new_decl)); DECL_SIZE (new_decl) = size; size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl)); DECL_SIZE_UNIT (new_decl) = size; } } /* The callback for remap_decl. Search all containing contexts for a mapping of the variable; this avoids having to duplicate the splay tree ahead of time. We know a mapping doesn't already exist in the given context. Create new mappings to implement default semantics. */ static tree omp_copy_decl (tree var, copy_body_data *cb) { omp_context *ctx = (omp_context *) cb; tree new_var; if (TREE_CODE (var) == LABEL_DECL) { new_var = create_artificial_label (); DECL_CONTEXT (new_var) = current_function_decl; insert_decl_map (&ctx->cb, var, new_var); return new_var; } while (!is_parallel_ctx (ctx)) { ctx = ctx->outer; if (ctx == NULL) return var; new_var = maybe_lookup_decl (var, ctx); if (new_var) return new_var; } if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn) return var; return error_mark_node; } /* Return the parallel region associated with STMT. */ /* Debugging dumps for parallel regions. */ void dump_omp_region (FILE *, struct omp_region *, int); void debug_omp_region (struct omp_region *); void debug_all_omp_regions (void); /* Dump the parallel region tree rooted at REGION. */ void dump_omp_region (FILE *file, struct omp_region *region, int indent) { fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, tree_code_name[region->type]); if (region->inner) dump_omp_region (file, region->inner, indent + 4); if (region->cont) { fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "", region->cont->index); } if (region->exit) fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "", region->exit->index); else fprintf (file, "%*s[no exit marker]\n", indent, ""); if (region->next) dump_omp_region (file, region->next, indent); } void debug_omp_region (struct omp_region *region) { dump_omp_region (stderr, region, 0); } void debug_all_omp_regions (void) { dump_omp_region (stderr, root_omp_region, 0); } /* Create a new parallel region starting at STMT inside region PARENT. */ struct omp_region * new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent) { struct omp_region *region = xcalloc (1, sizeof (*region)); region->outer = parent; region->entry = bb; region->type = type; if (parent) { /* This is a nested region. Add it to the list of inner regions in PARENT. */ region->next = parent->inner; parent->inner = region; } else { /* This is a toplevel region. Add it to the list of toplevel regions in ROOT_OMP_REGION. */ region->next = root_omp_region; root_omp_region = region; } return region; } /* Release the memory associated with the region tree rooted at REGION. */ static void free_omp_region_1 (struct omp_region *region) { struct omp_region *i, *n; for (i = region->inner; i ; i = n) { n = i->next; free_omp_region_1 (i); } free (region); } /* Release the memory for the entire omp region tree. */ void free_omp_regions (void) { struct omp_region *r, *n; for (r = root_omp_region; r ; r = n) { n = r->next; free_omp_region_1 (r); } root_omp_region = NULL; } /* Create a new context, with OUTER_CTX being the surrounding context. */ static omp_context * new_omp_context (tree stmt, omp_context *outer_ctx) { omp_context *ctx = XCNEW (omp_context); splay_tree_insert (all_contexts, (splay_tree_key) stmt, (splay_tree_value) ctx); ctx->stmt = stmt; if (outer_ctx) { ctx->outer = outer_ctx; ctx->cb = outer_ctx->cb; ctx->cb.block = NULL; ctx->depth = outer_ctx->depth + 1; } else { ctx->cb.src_fn = current_function_decl; ctx->cb.dst_fn = current_function_decl; ctx->cb.src_node = cgraph_node (current_function_decl); ctx->cb.dst_node = ctx->cb.src_node; ctx->cb.src_cfun = cfun; ctx->cb.copy_decl = omp_copy_decl; ctx->cb.eh_region = -1; ctx->cb.transform_call_graph_edges = CB_CGE_MOVE; ctx->depth = 1; } ctx->cb.decl_map = pointer_map_create (); return ctx; } /* Destroy a omp_context data structures. Called through the splay tree value delete callback. */ static void delete_omp_context (splay_tree_value value) { omp_context *ctx = (omp_context *) value; pointer_map_destroy (ctx->cb.decl_map); if (ctx->field_map) splay_tree_delete (ctx->field_map); /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before it produces corrupt debug information. */ if (ctx->record_type) { tree t; for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t)) DECL_ABSTRACT_ORIGIN (t) = NULL; } XDELETE (ctx); } /* Fix up RECEIVER_DECL with a type that has been remapped to the child context. */ static void fixup_child_record_type (omp_context *ctx) { tree f, type = ctx->record_type; /* ??? It isn't sufficient to just call remap_type here, because variably_modified_type_p doesn't work the way we expect for record types. Testing each field for whether it needs remapping and creating a new record by hand works, however. */ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) break; if (f) { tree name, new_fields = NULL; type = lang_hooks.types.make_type (RECORD_TYPE); name = DECL_NAME (TYPE_NAME (ctx->record_type)); name = build_decl (TYPE_DECL, name, type); TYPE_NAME (type) = name; for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f)) { tree new_f = copy_node (f); DECL_CONTEXT (new_f) = type; TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb); TREE_CHAIN (new_f) = new_fields; new_fields = new_f; /* Arrange to be able to look up the receiver field given the sender field. */ splay_tree_insert (ctx->field_map, (splay_tree_key) f, (splay_tree_value) new_f); } TYPE_FIELDS (type) = nreverse (new_fields); layout_type (type); } TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type); } /* Instantiate decls as necessary in CTX to satisfy the data sharing specified by CLAUSES. */ static void scan_sharing_clauses (tree clauses, omp_context *ctx) { tree c, decl; bool scan_array_reductions = false; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { bool by_ref; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: decl = OMP_CLAUSE_DECL (c); if (!is_variable_sized (decl)) install_var_local (decl, ctx); break; case OMP_CLAUSE_SHARED: gcc_assert (is_parallel_ctx (ctx)); decl = OMP_CLAUSE_DECL (c); gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl)) || !is_variable_sized (decl)); by_ref = use_pointer_for_field (decl, ctx); /* Global variables don't need to be copied, the receiver side will use them directly. */ if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) break; if (! TREE_READONLY (decl) || TREE_ADDRESSABLE (decl) || by_ref || is_reference (decl)) { install_var_field (decl, by_ref, ctx); install_var_local (decl, ctx); break; } /* We don't need to copy const scalar vars back. */ OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE); goto do_private; case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); do_private: if (is_variable_sized (decl)) break; else if (is_parallel_ctx (ctx) && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) { by_ref = use_pointer_for_field (decl, NULL); install_var_field (decl, by_ref, ctx); } install_var_local (decl, ctx); break; case OMP_CLAUSE_COPYPRIVATE: if (ctx->outer) scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer); /* FALLTHRU */ case OMP_CLAUSE_COPYIN: decl = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (decl, NULL); install_var_field (decl, by_ref, ctx); break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: if (ctx->outer) scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer); break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; default: gcc_unreachable (); } } for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); if (is_variable_sized (decl)) install_var_local (decl, ctx); fixup_remapped_decl (decl, ctx, OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_PRIVATE_DEBUG (c)); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) scan_array_reductions = true; break; case OMP_CLAUSE_SHARED: decl = OMP_CLAUSE_DECL (c); if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) fixup_remapped_decl (decl, ctx, false); break; case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; default: gcc_unreachable (); } } if (scan_array_reductions) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx); scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx); } } /* Create a new name for omp child function. Returns an identifier. */ static GTY(()) unsigned int tmp_ompfn_id_num; static tree create_omp_child_function_name (void) { tree name = DECL_ASSEMBLER_NAME (current_function_decl); size_t len = IDENTIFIER_LENGTH (name); char *tmp_name, *prefix; prefix = alloca (len + sizeof ("_omp_fn")); memcpy (prefix, IDENTIFIER_POINTER (name), len); strcpy (prefix + len, "_omp_fn"); #ifndef NO_DOT_IN_LABEL prefix[len] = '.'; #elif !defined NO_DOLLAR_IN_LABEL prefix[len] = '$'; #endif ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++); return get_identifier (tmp_name); } /* Build a decl for the omp child function. It'll not contain a body yet, just the bare decl. */ static void create_omp_child_function (omp_context *ctx) { tree decl, type, name, t; name = create_omp_child_function_name (); type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); decl = build_decl (FUNCTION_DECL, name, type); decl = lang_hooks.decls.pushdecl (decl); ctx->cb.dst_fn = decl; TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 0; TREE_PUBLIC (decl) = 0; DECL_UNINLINABLE (decl) = 1; DECL_EXTERNAL (decl) = 0; DECL_CONTEXT (decl) = NULL_TREE; DECL_INITIAL (decl) = make_node (BLOCK); t = build_decl (RESULT_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; DECL_RESULT (decl) = t; t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node); DECL_ARTIFICIAL (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; DECL_ARGUMENTS (decl) = t; ctx->receiver_decl = t; /* Allocate memory for the function structure. The call to allocate_struct_function clobbers CFUN, so we need to restore it afterward. */ push_struct_function (decl); DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt); cfun->function_end_locus = EXPR_LOCATION (ctx->stmt); pop_cfun (); } /* Scan an OpenMP parallel directive. */ static void scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx) { omp_context *ctx; tree name; /* Ignore parallel directives with empty bodies, unless there are copyin clauses. */ if (optimize > 0 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p)) && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL) { *stmt_p = build_empty_stmt (); return; } ctx = new_omp_context (*stmt_p, outer_ctx); if (parallel_nesting_level > 1) ctx->is_nested = true; ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED; ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_data_s"); name = build_decl (TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; create_omp_child_function (ctx); OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn; scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx); scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = ctx->receiver_decl = NULL; else { layout_type (ctx->record_type); fixup_child_record_type (ctx); } } /* Scan an OpenMP loop directive. */ static void scan_omp_for (tree *stmt_p, omp_context *outer_ctx) { omp_context *ctx; tree stmt; stmt = *stmt_p; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx); scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx); scan_omp (&OMP_FOR_INIT (stmt), ctx); scan_omp (&OMP_FOR_COND (stmt), ctx); scan_omp (&OMP_FOR_INCR (stmt), ctx); scan_omp (&OMP_FOR_BODY (stmt), ctx); } /* Scan an OpenMP sections directive. */ static void scan_omp_sections (tree *stmt_p, omp_context *outer_ctx) { tree stmt; omp_context *ctx; stmt = *stmt_p; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx); scan_omp (&OMP_SECTIONS_BODY (stmt), ctx); } /* Scan an OpenMP single directive. */ static void scan_omp_single (tree *stmt_p, omp_context *outer_ctx) { tree stmt = *stmt_p; omp_context *ctx; tree name; ctx = new_omp_context (stmt, outer_ctx); ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_copy_s"); name = build_decl (TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx); scan_omp (&OMP_SINGLE_BODY (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = NULL; else layout_type (ctx->record_type); } /* Check OpenMP nesting restrictions. */ static void check_omp_nesting_restrictions (tree t, omp_context *ctx) { switch (TREE_CODE (t)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: case OMP_ORDERED: case OMP_MASTER: warning (0, "work-sharing region may not be closely nested inside " "of work-sharing, critical, ordered or master region"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_MASTER: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: warning (0, "master region may not be closely nested inside " "of work-sharing region"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_ORDERED: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_CRITICAL: warning (0, "ordered region may not be closely nested inside " "of critical region"); return; case OMP_FOR: if (find_omp_clause (OMP_CLAUSES (ctx->stmt), OMP_CLAUSE_ORDERED) == NULL) warning (0, "ordered region must be closely nested inside " "a loop region with an ordered clause"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_CRITICAL: for (; ctx != NULL; ctx = ctx->outer) if (TREE_CODE (ctx->stmt) == OMP_CRITICAL && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt)) { warning (0, "critical region may not be nested inside a critical " "region with the same name"); return; } break; default: break; } } /* Callback for walk_stmts used to scan for OpenMP directives at TP. */ static tree scan_omp_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; omp_context *ctx = wi->info; tree t = *tp; if (EXPR_HAS_LOCATION (t)) input_location = EXPR_LOCATION (t); /* Check the OpenMP nesting restrictions. */ if (OMP_DIRECTIVE_P (t) && ctx != NULL) check_omp_nesting_restrictions (t, ctx); *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: parallel_nesting_level++; scan_omp_parallel (tp, ctx); parallel_nesting_level--; break; case OMP_FOR: scan_omp_for (tp, ctx); break; case OMP_SECTIONS: scan_omp_sections (tp, ctx); break; case OMP_SINGLE: scan_omp_single (tp, ctx); break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: ctx = new_omp_context (*tp, ctx); scan_omp (&OMP_BODY (*tp), ctx); break; case BIND_EXPR: { tree var; *walk_subtrees = 1; for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var)) insert_decl_map (&ctx->cb, var, var); } break; case VAR_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: if (ctx) *tp = remap_decl (t, &ctx->cb); break; default: if (ctx && TYPE_P (t)) *tp = remap_type (t, &ctx->cb); else if (!DECL_P (t)) *walk_subtrees = 1; break; } return NULL_TREE; } /* Scan all the statements starting at STMT_P. CTX contains context information about the OpenMP directives and clauses found during the scan. */ static void scan_omp (tree *stmt_p, omp_context *ctx) { location_t saved_location; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.callback = scan_omp_1; wi.info = ctx; wi.want_bind_expr = (ctx != NULL); wi.want_locations = true; saved_location = input_location; walk_stmts (&wi, stmt_p); input_location = saved_location; } /* Re-gimplification and code generation routines. */ /* Build a call to GOMP_barrier. */ static tree build_omp_barrier (void) { return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0); } /* If a context was created for STMT when it was scanned, return it. */ static omp_context * maybe_lookup_ctx (tree stmt) { splay_tree_node n; n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt); return n ? (omp_context *) n->value : NULL; } /* Find the mapping for DECL in CTX or the immediately enclosing context that has a mapping for DECL. If CTX is a nested parallel directive, we may have to use the decl mappings created in CTX's parent context. Suppose that we have the following parallel nesting (variable UIDs showed for clarity): iD.1562 = 0; #omp parallel shared(iD.1562) -> outer parallel iD.1562 = iD.1562 + 1; #omp parallel shared (iD.1562) -> inner parallel iD.1562 = iD.1562 - 1; Each parallel structure will create a distinct .omp_data_s structure for copying iD.1562 in/out of the directive: outer parallel .omp_data_s.1.i -> iD.1562 inner parallel .omp_data_s.2.i -> iD.1562 A shared variable mapping will produce a copy-out operation before the parallel directive and a copy-in operation after it. So, in this case we would have: iD.1562 = 0; .omp_data_o.1.i = iD.1562; #omp parallel shared(iD.1562) -> outer parallel .omp_data_i.1 = &.omp_data_o.1 .omp_data_i.1->i = .omp_data_i.1->i + 1; .omp_data_o.2.i = iD.1562; -> ** #omp parallel shared(iD.1562) -> inner parallel .omp_data_i.2 = &.omp_data_o.2 .omp_data_i.2->i = .omp_data_i.2->i - 1; ** This is a problem. The symbol iD.1562 cannot be referenced inside the body of the outer parallel region. But since we are emitting this copy operation while expanding the inner parallel directive, we need to access the CTX structure of the outer parallel directive to get the correct mapping: .omp_data_o.2.i = .omp_data_i.1->i Since there may be other workshare or parallel directives enclosing the parallel directive, it may be necessary to walk up the context parent chain. This is not a problem in general because nested parallelism happens only rarely. */ static tree lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t; omp_context *up; for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); gcc_assert (!ctx->is_nested || t || is_global_var (decl)); return t ? t : decl; } /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found in outer contexts. */ static tree maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t = NULL; omp_context *up; for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); return t ? t : decl; } /* Construct the initialization value for reduction CLAUSE. */ tree omp_reduction_init (tree clause, tree type) { switch (OMP_CLAUSE_REDUCTION_CODE (clause)) { case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_XOR_EXPR: case NE_EXPR: return fold_convert (type, integer_zero_node); case MULT_EXPR: case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: case EQ_EXPR: return fold_convert (type, integer_one_node); case BIT_AND_EXPR: return fold_convert (type, integer_minus_one_node); case MAX_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max, min; if (HONOR_INFINITIES (TYPE_MODE (type))) { real_inf (&max); real_arithmetic (&min, NEGATE_EXPR, &max, NULL); } else real_maxval (&min, 1, TYPE_MODE (type)); return build_real (type, min); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MIN_VALUE (type); } case MIN_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max; if (HONOR_INFINITIES (TYPE_MODE (type))) real_inf (&max); else real_maxval (&max, 0, TYPE_MODE (type)); return build_real (type, max); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MAX_VALUE (type); } default: gcc_unreachable (); } } /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN, from the receiver (aka child) side and initializers for REFERENCE_TYPE private variables. Initialization statements go in ILIST, while calls to destructors go in DLIST. */ static void lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist, omp_context *ctx) { tree_stmt_iterator diter; tree c, dtor, copyin_seq, x, ptr; bool copyin_by_ref = false; bool lastprivate_firstprivate = false; int pass; *dlist = alloc_stmt_list (); diter = tsi_start (*dlist); copyin_seq = NULL; /* Do all the fixed sized types in the first pass, and the variable sized types in the second pass. This makes sure that the scalar arguments to the variable sized types are processed before we use them in the variable sized operations. */ for (pass = 0; pass < 2; ++pass) { for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); tree var, new_var; bool by_ref; switch (c_kind) { case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_PRIVATE_DEBUG (c)) continue; break; case OMP_CLAUSE_SHARED: if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL) { gcc_assert (is_global_var (OMP_CLAUSE_DECL (c))); continue; } case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_REDUCTION: break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) { lastprivate_firstprivate = true; if (pass != 0) continue; } break; default: continue; } new_var = var = OMP_CLAUSE_DECL (c); if (c_kind != OMP_CLAUSE_COPYIN) new_var = lookup_decl (var, ctx); if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN) { if (pass != 0) continue; } else if (is_variable_sized (var)) { /* For variable sized types, we need to allocate the actual storage here. Call alloca and store the result in the pointer decl that we created elsewhere. */ if (pass == 0) continue; ptr = DECL_VALUE_EXPR (new_var); gcc_assert (TREE_CODE (ptr) == INDIRECT_REF); ptr = TREE_OPERAND (ptr, 0); gcc_assert (DECL_P (ptr)); x = TYPE_SIZE_UNIT (TREE_TYPE (new_var)); x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x); x = fold_convert (TREE_TYPE (ptr), x); x = build_gimple_modify_stmt (ptr, x); gimplify_and_add (x, ilist); } else if (is_reference (var)) { /* For references that are being privatized for Fortran, allocate new backing storage for the new pointer variable. This allows us to avoid changing all the code that expects a pointer to something that expects a direct variable. Note that this doesn't apply to C++, since reference types are disallowed in data sharing clauses there, except for NRV optimized return values. */ if (pass == 0) continue; x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var))); if (TREE_CONSTANT (x)) { const char *name = NULL; if (DECL_NAME (var)) name = IDENTIFIER_POINTER (DECL_NAME (new_var)); x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)), name); gimple_add_tmp_var (x); x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var)); } else { x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x); x = fold_convert (TREE_TYPE (new_var), x); } x = build_gimple_modify_stmt (new_var, x); gimplify_and_add (x, ilist); new_var = build_fold_indirect_ref (new_var); } else if (c_kind == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { if (pass == 0) continue; } else if (pass != 0) continue; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: /* Shared global vars are just accessed directly. */ if (is_global_var (new_var)) break; /* Set up the DECL_VALUE_EXPR for shared variables now. This needs to be delayed until after fixup_child_record_type so that we get the correct type during the dereference. */ by_ref = use_pointer_for_field (var, ctx); x = build_receiver_ref (var, by_ref, ctx); SET_DECL_VALUE_EXPR (new_var, x); DECL_HAS_VALUE_EXPR_P (new_var) = 1; /* ??? If VAR is not passed by reference, and the variable hasn't been initialized yet, then we'll get a warning for the store into the omp_data_s structure. Ideally, we'd be able to notice this and not store anything at all, but we're generating code too early. Suppress the warning. */ if (!by_ref) TREE_NO_WARNING (var) = 1; break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: x = lang_hooks.decls.omp_clause_default_ctor (c, new_var); if (x) gimplify_and_add (x, ilist); /* FALLTHRU */ do_dtor: x = lang_hooks.decls.omp_clause_dtor (c, new_var); if (x) { dtor = x; gimplify_stmt (&dtor); tsi_link_before (&diter, dtor, TSI_SAME_STMT); } break; case OMP_CLAUSE_FIRSTPRIVATE: x = build_outer_var_ref (var, ctx); x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x); gimplify_and_add (x, ilist); goto do_dtor; break; case OMP_CLAUSE_COPYIN: by_ref = use_pointer_for_field (var, NULL); x = build_receiver_ref (var, by_ref, ctx); x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x); append_to_statement_list (x, &copyin_seq); copyin_by_ref |= by_ref; break; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist); OMP_CLAUSE_REDUCTION_INIT (c) = NULL; } else { x = omp_reduction_init (c, TREE_TYPE (new_var)); gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE); x = build_gimple_modify_stmt (new_var, x); gimplify_and_add (x, ilist); } break; default: gcc_unreachable (); } } } /* The copyin sequence is not to be executed by the main thread, since that would result in self-copies. Perhaps not visible to scalars, but it certainly is to C++ operator=. */ if (copyin_seq) { x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0); x = build2 (NE_EXPR, boolean_type_node, x, build_int_cst (TREE_TYPE (x), 0)); x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL); gimplify_and_add (x, ilist); } /* If any copyin variable is passed by reference, we must ensure the master thread doesn't modify it before it is copied over in all threads. Similarly for variables in both firstprivate and lastprivate clauses we need to ensure the lastprivate copying happens after firstprivate copying in all threads. */ if (copyin_by_ref || lastprivate_firstprivate) gimplify_and_add (build_omp_barrier (), ilist); } /* Generate code to implement the LASTPRIVATE clauses. This is used for both parallel and workshare constructs. PREDICATE may be NULL if it's always true. */ static void lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list, omp_context *ctx) { tree sub_list, x, c; /* Early exit if there are no lastprivate clauses. */ clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) { /* If this was a workshare clause, see if it had been combined with its parallel. In that case, look for the clauses on the parallel statement itself. */ if (is_parallel_ctx (ctx)) return; ctx = ctx->outer; if (ctx == NULL || !is_parallel_ctx (ctx)) return; clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt), OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) return; } sub_list = alloc_stmt_list (); for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, new_var; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); x = build_outer_var_ref (var, ctx); if (is_reference (var)) new_var = build_fold_indirect_ref (new_var); x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var); append_to_statement_list (x, &sub_list); } if (predicate) x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL); else x = sub_list; gimplify_and_add (x, stmt_list); } /* Generate code to implement the REDUCTION clauses. */ static void lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx) { tree sub_list = NULL, x, c; int count = 0; /* First see if there is exactly one reduction clause. Use OMP_ATOMIC update in that case, otherwise use a lock. */ for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { /* Never use OMP_ATOMIC for array reductions. */ count = -1; break; } count++; } if (count == 0) return; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, new_var; enum tree_code code; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); if (is_reference (var)) new_var = build_fold_indirect_ref (new_var); ref = build_outer_var_ref (var, ctx); code = OMP_CLAUSE_REDUCTION_CODE (c); /* reduction(-:var) sums up the partial results, so it acts identically to reduction(+:var). */ if (code == MINUS_EXPR) code = PLUS_EXPR; if (count == 1) { tree addr = build_fold_addr_expr (ref); addr = save_expr (addr); ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr); x = fold_build2 (code, TREE_TYPE (ref), ref, new_var); x = build2 (OMP_ATOMIC, void_type_node, addr, x); gimplify_and_add (x, stmt_list); return; } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); if (is_reference (var)) ref = build_fold_addr_expr (ref); SET_DECL_VALUE_EXPR (placeholder, ref); DECL_HAS_VALUE_EXPR_P (placeholder) = 1; gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list); OMP_CLAUSE_REDUCTION_MERGE (c) = NULL; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL; } else { x = build2 (code, TREE_TYPE (ref), ref, new_var); ref = build_outer_var_ref (var, ctx); x = build_gimple_modify_stmt (ref, x); append_to_statement_list (x, &sub_list); } } x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0); gimplify_and_add (x, stmt_list); gimplify_and_add (sub_list, stmt_list); x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0); gimplify_and_add (x, stmt_list); } /* Generate code to implement the COPYPRIVATE clauses. */ static void lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, x; bool by_ref; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE) continue; var = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (var, NULL); ref = build_sender_ref (var, ctx); x = lookup_decl_in_outer_ctx (var, ctx); x = by_ref ? build_fold_addr_expr (x) : x; x = build_gimple_modify_stmt (ref, x); gimplify_and_add (x, slist); ref = build_receiver_ref (var, by_ref, ctx); if (is_reference (var)) { ref = build_fold_indirect_ref (ref); var = build_fold_indirect_ref (var); } x = lang_hooks.decls.omp_clause_assign_op (c, var, ref); gimplify_and_add (x, rlist); } } /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE, and REDUCTION from the sender (aka parent) side. */ static void lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree val, ref, x, var; bool by_ref, do_in = false, do_out = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: break; default: continue; } val = OMP_CLAUSE_DECL (c); var = lookup_decl_in_outer_ctx (val, ctx); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN && is_global_var (var)) continue; if (is_variable_sized (val)) continue; by_ref = use_pointer_for_field (val, NULL); switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: do_in = true; break; case OMP_CLAUSE_LASTPRIVATE: if (by_ref || is_reference (val)) { if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) continue; do_in = true; } else do_out = true; break; case OMP_CLAUSE_REDUCTION: do_in = true; do_out = !(by_ref || is_reference (val)); break; default: gcc_unreachable (); } if (do_in) { ref = build_sender_ref (val, ctx); x = by_ref ? build_fold_addr_expr (var) : var; x = build_gimple_modify_stmt (ref, x); gimplify_and_add (x, ilist); } if (do_out) { ref = build_sender_ref (val, ctx); x = build_gimple_modify_stmt (var, ref); gimplify_and_add (x, olist); } } } /* Generate code to implement SHARED from the sender (aka parent) side. This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that got automatically shared. */ static void lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx) { tree var, ovar, nvar, f, x; if (ctx->record_type == NULL) return; for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f)) { ovar = DECL_ABSTRACT_ORIGIN (f); nvar = maybe_lookup_decl (ovar, ctx); if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar)) continue; /* If CTX is a nested parallel directive. Find the immediately enclosing parallel or workshare construct that contains a mapping for OVAR. */ var = lookup_decl_in_outer_ctx (ovar, ctx); if (use_pointer_for_field (ovar, ctx)) { x = build_sender_ref (ovar, ctx); var = build_fold_addr_expr (var); x = build_gimple_modify_stmt (x, var); gimplify_and_add (x, ilist); } else { x = build_sender_ref (ovar, ctx); x = build_gimple_modify_stmt (x, var); gimplify_and_add (x, ilist); x = build_sender_ref (ovar, ctx); x = build_gimple_modify_stmt (var, x); gimplify_and_add (x, olist); } } } /* Build the function calls to GOMP_parallel_start etc to actually generate the parallel operation. REGION is the parallel region being expanded. BB is the block where to insert the code. WS_ARGS will be set if this is a call to a combined parallel+workshare construct, it contains the list of additional arguments needed by the workshare construct. */ static void expand_parallel_call (struct omp_region *region, basic_block bb, tree entry_stmt, tree ws_args) { tree t, t1, t2, val, cond, c, clauses; block_stmt_iterator si; int start_ix; clauses = OMP_PARALLEL_CLAUSES (entry_stmt); /* Determine what flavor of GOMP_parallel_start we will be emitting. */ start_ix = BUILT_IN_GOMP_PARALLEL_START; if (is_combined_parallel (region)) { switch (region->inner->type) { case OMP_FOR: start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START + region->inner->sched_kind; break; case OMP_SECTIONS: start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START; break; default: gcc_unreachable (); } } /* By default, the value of NUM_THREADS is zero (selected at run time) and there is no conditional. */ cond = NULL_TREE; val = build_int_cst (unsigned_type_node, 0); c = find_omp_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS); if (c) val = OMP_CLAUSE_NUM_THREADS_EXPR (c); /* Ensure 'val' is of the correct type. */ val = fold_convert (unsigned_type_node, val); /* If we found the clause 'if (cond)', build either (cond != 0) or (cond ? val : 1u). */ if (cond) { block_stmt_iterator si; cond = gimple_boolify (cond); if (integer_zerop (val)) val = fold_build2 (EQ_EXPR, unsigned_type_node, cond, build_int_cst (TREE_TYPE (cond), 0)); else { basic_block cond_bb, then_bb, else_bb; edge e, e_then, e_else; tree t, tmp_then, tmp_else, tmp_join, tmp_var; tmp_var = create_tmp_var (TREE_TYPE (val), NULL); if (gimple_in_ssa_p (cfun)) { tmp_then = make_ssa_name (tmp_var, NULL_TREE); tmp_else = make_ssa_name (tmp_var, NULL_TREE); tmp_join = make_ssa_name (tmp_var, NULL_TREE); } else { tmp_then = tmp_var; tmp_else = tmp_var; tmp_join = tmp_var; } e = split_block (bb, NULL); cond_bb = e->src; bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); t = build3 (COND_EXPR, void_type_node, cond, NULL_TREE, NULL_TREE); si = bsi_start (cond_bb); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); si = bsi_start (then_bb); t = build_gimple_modify_stmt (tmp_then, val); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (tmp_then) = t; bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); si = bsi_start (else_bb); t = build_gimple_modify_stmt (tmp_else, build_int_cst (unsigned_type_node, 1)); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (tmp_else) = t; bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); e_then = make_edge (then_bb, bb, EDGE_FALLTHRU); e_else = make_edge (else_bb, bb, EDGE_FALLTHRU); if (gimple_in_ssa_p (cfun)) { tree phi = create_phi_node (tmp_join, bb); SSA_NAME_DEF_STMT (tmp_join) = phi; add_phi_arg (phi, tmp_then, e_then); add_phi_arg (phi, tmp_else, e_else); } val = tmp_join; } si = bsi_start (bb); val = force_gimple_operand_bsi (&si, val, true, NULL_TREE, false, BSI_CONTINUE_LINKING); } si = bsi_last (bb); t = OMP_PARALLEL_DATA_ARG (entry_stmt); if (t == NULL) t1 = null_pointer_node; else t1 = build_fold_addr_expr (t); t2 = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt)); if (ws_args) { tree args = tree_cons (NULL, t2, tree_cons (NULL, t1, tree_cons (NULL, val, ws_args))); t = build_function_call_expr (built_in_decls[start_ix], args); } else t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val); force_gimple_operand_bsi (&si, t, true, NULL_TREE, false, BSI_CONTINUE_LINKING); t = OMP_PARALLEL_DATA_ARG (entry_stmt); if (t == NULL) t = null_pointer_node; else t = build_fold_addr_expr (t); t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t); force_gimple_operand_bsi (&si, t, true, NULL_TREE, false, BSI_CONTINUE_LINKING); t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0); force_gimple_operand_bsi (&si, t, true, NULL_TREE, false, BSI_CONTINUE_LINKING); } /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch handler. This prevents programs from violating the structured block semantics with throws. */ static void maybe_catch_exception (tree *stmt_p) { tree f, t; if (!flag_exceptions) return; if (lang_protect_cleanup_actions) t = lang_protect_cleanup_actions (); else t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0); f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL); EH_FILTER_MUST_NOT_THROW (f) = 1; gimplify_and_add (t, &EH_FILTER_FAILURE (f)); t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL); append_to_statement_list (f, &TREE_OPERAND (t, 1)); *stmt_p = NULL; append_to_statement_list (t, stmt_p); } /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ static tree list2chain (tree list) { tree t; for (t = list; t; t = TREE_CHAIN (t)) { tree var = TREE_VALUE (t); if (TREE_CHAIN (t)) TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t)); else TREE_CHAIN (var) = NULL_TREE; } return list ? TREE_VALUE (list) : NULL_TREE; } /* Remove barriers in REGION->EXIT's block. Note that this is only valid for OMP_PARALLEL regions. Since the end of a parallel region is an implicit barrier, any workshare inside the OMP_PARALLEL that left a barrier at the end of the OMP_PARALLEL region can now be removed. */ static void remove_exit_barrier (struct omp_region *region) { block_stmt_iterator si; basic_block exit_bb; edge_iterator ei; edge e; tree t; exit_bb = region->exit; /* If the parallel region doesn't return, we don't have REGION->EXIT block at all. */ if (! exit_bb) return; /* The last insn in the block will be the parallel's OMP_RETURN. The workshare's OMP_RETURN will be in a preceding block. The kinds of statements that can appear in between are extremely limited -- no memory operations at all. Here, we allow nothing at all, so the only thing we allow to precede this OMP_RETURN is a label. */ si = bsi_last (exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_prev (&si); if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR) return; FOR_EACH_EDGE (e, ei, exit_bb->preds) { si = bsi_last (e->src); if (bsi_end_p (si)) continue; t = bsi_stmt (si); if (TREE_CODE (t) == OMP_RETURN) OMP_RETURN_NOWAIT (t) = 1; } } static void remove_exit_barriers (struct omp_region *region) { if (region->type == OMP_PARALLEL) remove_exit_barrier (region); if (region->inner) { region = region->inner; remove_exit_barriers (region); while (region->next) { region = region->next; remove_exit_barriers (region); } } } /* Optimize omp_get_thread_num () and omp_get_num_threads () calls. These can't be declared as const functions, but within one parallel body they are constant, so they can be transformed there into __builtin_omp_get_{thread_num,num_threads} () which are declared const. */ static void optimize_omp_library_calls (void) { basic_block bb; block_stmt_iterator bsi; tree thr_num_id = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]); tree num_thr_id = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]); FOR_EACH_BB (bb) for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); tree call = get_call_expr_in (stmt); tree decl; if (call && (decl = get_callee_fndecl (call)) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl) && DECL_INITIAL (decl) == NULL) { tree built_in; if (DECL_NAME (decl) == thr_num_id) built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]; else if (DECL_NAME (decl) == num_thr_id) built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]; else continue; if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in) || call_expr_nargs (call) != 0) continue; if (flag_exceptions && !TREE_NOTHROW (decl)) continue; if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl))) != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in)))) continue; CALL_EXPR_FN (call) = build_fold_addr_expr (built_in); } } } /* Expand the OpenMP parallel directive starting at REGION. */ static void expand_omp_parallel (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t, ws_args; block_stmt_iterator si; tree entry_stmt; edge e; entry_stmt = last_stmt (region->entry); child_fn = OMP_PARALLEL_FN (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); /* If this function has been already instrumented, make sure the child function isn't instrumented again. */ child_cfun->after_tree_profile = cfun->after_tree_profile; entry_bb = region->entry; exit_bb = region->exit; if (is_combined_parallel (region)) ws_args = region->ws_args; else ws_args = NULL_TREE; if (child_cfun->cfg) { /* Due to inlining, it may happen that we have already outlined the region, in which case all we need to do is make the sub-graph unreachable and emit the parallel call. */ edge entry_succ_e, exit_succ_e; block_stmt_iterator si; entry_succ_e = single_succ_edge (entry_bb); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL); bsi_remove (&si, true); new_bb = entry_bb; if (exit_bb) { exit_succ_e = single_succ_edge (exit_bb); make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); } remove_edge_and_dominated_blocks (entry_succ_e); } else { /* If the parallel region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the parallel body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ if (OMP_PARALLEL_DATA_ARG (entry_stmt)) { basic_block entry_succ_bb = single_succ (entry_bb); block_stmt_iterator si; tree parcopy_stmt = NULL_TREE, arg, narg; for (si = bsi_start (entry_succ_bb); ; bsi_next (&si)) { tree stmt, arg; gcc_assert (!bsi_end_p (si)); stmt = bsi_stmt (si); if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) continue; arg = GIMPLE_STMT_OPERAND (stmt, 1); STRIP_NOPS (arg); if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == OMP_PARALLEL_DATA_ARG (entry_stmt)) { parcopy_stmt = stmt; break; } } gcc_assert (parcopy_stmt != NULL_TREE); arg = DECL_ARGUMENTS (child_fn); if (!gimple_in_ssa_p (cfun)) { if (GIMPLE_STMT_OPERAND (parcopy_stmt, 0) == arg) bsi_remove (&si, true); else GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = arg; } else { /* If we are in ssa form, we must load the value from the default definition of the argument. That should not be defined now, since the argument is not used uninitialized. */ gcc_assert (gimple_default_def (cfun, arg) == NULL); narg = make_ssa_name (arg, build_empty_stmt ()); set_default_def (arg, narg); GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = narg; update_stmt (parcopy_stmt); } } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list); DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb)); /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the child function. */ si = bsi_last (entry_bb); t = bsi_stmt (si); gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL); bsi_remove (&si, true); e = split_block (entry_bb, t); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Convert OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { si = bsi_last (exit_bb); gcc_assert (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) == OMP_RETURN); t = build1 (RETURN_EXPR, void_type_node, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } /* Move the parallel region into CHILD_CFUN. */ if (gimple_in_ssa_p (cfun)) { push_cfun (child_cfun); init_tree_ssa (); init_ssa_operands (); cfun->gimple_df->in_ssa_p = true; pop_cfun (); } new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; /* Inform the callgraph about the new function. */ DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties; cgraph_add_new_function (child_fn, true); /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); if (optimize) optimize_omp_library_calls (); rebuild_cgraph_edges (); /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; tree save_current = current_function_decl; bool changed = false; current_function_decl = child_fn; FOR_EACH_BB (bb) changed |= tree_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); current_function_decl = save_current; } pop_cfun (); } /* Emit a library call to launch the children threads. */ expand_parallel_call (region, new_bb, entry_stmt, ws_args); update_ssa (TODO_update_ssa_only_virtuals); } /* A subroutine of expand_omp_for. Generate code for a parallel loop with any schedule. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; iend = iend0; L1: BODY; V += STEP; if (V cond iend) goto L1; else goto L2; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: If this is a combined omp parallel loop, instead of the call to GOMP_loop_foo_start, we call GOMP_loop_foo_next. */ static void expand_omp_for_generic (struct omp_region *region, struct omp_for_data *fd, enum built_in_function start_fn, enum built_in_function next_fn) { tree type, istart0, iend0, iend, phi; tree t, vmain, vback; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb; basic_block l2_bb = NULL, l3_bb = NULL; block_stmt_iterator si; bool in_combined_parallel = is_combined_parallel (region); bool broken_loop = region->cont == NULL; edge e, ne; gcc_assert (!broken_loop || !in_combined_parallel); type = TREE_TYPE (fd->v); istart0 = create_tmp_var (long_integer_type_node, ".istart0"); iend0 = create_tmp_var (long_integer_type_node, ".iend0"); TREE_ADDRESSABLE (istart0) = 1; TREE_ADDRESSABLE (iend0) = 1; if (gimple_in_ssa_p (cfun)) { add_referenced_var (istart0); add_referenced_var (iend0); } entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = split_edge (FALLTHRU_EDGE (entry_bb)); l1_bb = single_succ (l0_bb); if (!broken_loop) { l2_bb = create_empty_bb (cont_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } else l2_bb = NULL; l3_bb = BRANCH_EDGE (entry_bb)->dest; exit_bb = region->exit; si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); if (in_combined_parallel) { /* In a combined parallel loop, emit a call to GOMP_loop_foo_next. */ t = build_call_expr (built_in_decls[next_fn], 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); } else { tree t0, t1, t2, t3, t4; /* If this is not a combined parallel loop, emit a call to GOMP_loop_foo_start in ENTRY_BB. */ t4 = build_fold_addr_expr (iend0); t3 = build_fold_addr_expr (istart0); t2 = fold_convert (long_integer_type_node, fd->step); t1 = fold_convert (long_integer_type_node, fd->n2); t0 = fold_convert (long_integer_type_node, fd->n1); if (fd->chunk_size) { t = fold_convert (long_integer_type_node, fd->chunk_size); t = build_call_expr (built_in_decls[start_fn], 6, t0, t1, t2, t, t3, t4); } else t = build_call_expr (built_in_decls[start_fn], 5, t0, t1, t2, t3, t4); } t = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE); bsi_insert_after (&si, t, BSI_SAME_STMT); /* Remove the OMP_FOR statement. */ bsi_remove (&si, true); /* Iteration setup for sequential loop goes in L0_BB. */ si = bsi_start (l0_bb); t = fold_convert (type, istart0); t = force_gimple_operand_bsi (&si, t, false, NULL_TREE, false, BSI_CONTINUE_LINKING); t = build_gimple_modify_stmt (fd->v, t); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (fd->v) = t; t = fold_convert (type, iend0); iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE, false, BSI_CONTINUE_LINKING); if (!broken_loop) { /* Code to control the increment and predicate for the sequential loop goes in the CONT_BB. */ si = bsi_last (cont_bb); t = bsi_stmt (si); gcc_assert (TREE_CODE (t) == OMP_CONTINUE); vmain = TREE_OPERAND (t, 1); vback = TREE_OPERAND (t, 0); t = fold_build2 (PLUS_EXPR, type, vmain, fd->step); t = force_gimple_operand_bsi (&si, t, false, NULL_TREE, true, BSI_SAME_STMT); t = build_gimple_modify_stmt (vback, t); bsi_insert_before (&si, t, BSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (vback) = t; t = build2 (fd->cond_code, boolean_type_node, vback, iend); t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE); bsi_insert_before (&si, t, BSI_SAME_STMT); /* Remove OMP_CONTINUE. */ bsi_remove (&si, true); /* Emit code to get the next parallel iteration in L2_BB. */ si = bsi_start (l2_bb); t = build_call_expr (built_in_decls[next_fn], 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); t = force_gimple_operand_bsi (&si, t, true, NULL_TREE, false, BSI_CONTINUE_LINKING); t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); } /* Add the loop cleanup function. */ si = bsi_last (exit_bb); if (OMP_RETURN_NOWAIT (bsi_stmt (si))) t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT]; else t = built_in_decls[BUILT_IN_GOMP_LOOP_END]; t = build_call_expr (t, 0); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); /* Connect the new blocks. */ find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE; find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { e = find_edge (cont_bb, l3_bb); ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE); for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi)) SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne), PHI_ARG_DEF_FROM_EDGE (phi, e)); remove_edge (e); find_edge (cont_bb, l1_bb)->flags = EDGE_TRUE_VALUE; make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE); make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE); set_immediate_dominator (CDI_DOMINATORS, l2_bb, recompute_dominator (CDI_DOMINATORS, l2_bb)); set_immediate_dominator (CDI_DOMINATORS, l3_bb, recompute_dominator (CDI_DOMINATORS, l3_bb)); set_immediate_dominator (CDI_DOMINATORS, l0_bb, recompute_dominator (CDI_DOMINATORS, l0_bb)); set_immediate_dominator (CDI_DOMINATORS, l1_bb, recompute_dominator (CDI_DOMINATORS, l1_bb)); } } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and no specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; n = (adj + N2 - N1) / STEP; q = n / nthreads; q += (q * nthreads != n); s0 = q * threadid; e0 = min(s0 + q, n); V = s0 * STEP + N1; if (s0 >= e0) goto L2; else goto L0; L0: e = e0 * STEP + N1; L1: BODY; V += STEP; if (V cond e) goto L1; L2: */ static void expand_omp_for_static_nochunk (struct omp_region *region, struct omp_for_data *fd) { tree n, q, s0, e0, e, t, nthreads, threadid; tree type, vmain, vback; basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb; basic_block fin_bb; block_stmt_iterator si; type = TREE_TYPE (fd->v); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb)); body_bb = single_succ (seq_start_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); fin_bb = FALLTHRU_EDGE (cont_bb)->dest; exit_bb = region->exit; /* Iteration space partitioning goes in ENTRY_BB. */ si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0); t = fold_convert (type, t); nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0); t = fold_convert (type, t); threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1), true, NULL_TREE, true, BSI_SAME_STMT); fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2), true, NULL_TREE, true, BSI_SAME_STMT); fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step), true, NULL_TREE, true, BSI_SAME_STMT); t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, type, fd->step, t); t = fold_build2 (PLUS_EXPR, type, t, fd->n2); t = fold_build2 (MINUS_EXPR, type, t, fd->n1); t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step); t = fold_convert (type, t); n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); t = fold_build2 (TRUNC_DIV_EXPR, type, n, nthreads); q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); t = fold_build2 (MULT_EXPR, type, q, nthreads); t = fold_build2 (NE_EXPR, type, t, n); t = fold_build2 (PLUS_EXPR, type, q, t); q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); t = build2 (MULT_EXPR, type, q, threadid); s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); t = fold_build2 (PLUS_EXPR, type, s0, q); t = fold_build2 (MIN_EXPR, type, t, n); e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); t = build2 (GE_EXPR, boolean_type_node, s0, e0); t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE); bsi_insert_before (&si, t, BSI_SAME_STMT); /* Remove the OMP_FOR statement. */ bsi_remove (&si, true); /* Setup code for sequential iteration goes in SEQ_START_BB. */ si = bsi_start (seq_start_bb); t = fold_convert (type, s0); t = fold_build2 (MULT_EXPR, type, t, fd->step); t = fold_build2 (PLUS_EXPR, type, t, fd->n1); t = force_gimple_operand_bsi (&si, t, false, NULL_TREE, false, BSI_CONTINUE_LINKING); t = build_gimple_modify_stmt (fd->v, t); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (fd->v) = t; t = fold_convert (type, e0); t = fold_build2 (MULT_EXPR, type, t, fd->step); t = fold_build2 (PLUS_EXPR, type, t, fd->n1); e = force_gimple_operand_bsi (&si, t, true, NULL_TREE, false, BSI_CONTINUE_LINKING); /* The code controlling the sequential loop replaces the OMP_CONTINUE. */ si = bsi_last (cont_bb); t = bsi_stmt (si); gcc_assert (TREE_CODE (t) == OMP_CONTINUE); vmain = TREE_OPERAND (t, 1); vback = TREE_OPERAND (t, 0); t = fold_build2 (PLUS_EXPR, type, vmain, fd->step); t = force_gimple_operand_bsi (&si, t, false, NULL_TREE, true, BSI_SAME_STMT); t = build_gimple_modify_stmt (vback, t); bsi_insert_before (&si, t, BSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (vback) = t; t = build2 (fd->cond_code, boolean_type_node, vback, e); t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE); bsi_insert_before (&si, t, BSI_SAME_STMT); /* Remove the OMP_CONTINUE statement. */ bsi_remove (&si, true); /* Replace the OMP_RETURN with a barrier, or nothing. */ si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si))) force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE, false, BSI_SAME_STMT); bsi_remove (&si, true); /* Connect all the blocks. */ find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE; find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE; set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and a specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; n = (adj + N2 - N1) / STEP; trip = 0; V = threadid * CHUNK * STEP + N1; -- this extra definition of V is here so that V is defined if the loop is not entered L0: s0 = (trip * nthreads + threadid) * CHUNK; e0 = min(s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; e = e0 * STEP + N1; L2: BODY; V += STEP; if (V cond e) goto L2; else goto L3; L3: trip += 1; goto L0; L4: */ static void expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd) { tree n, s0, e0, e, t, phi, nphi, args; tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid; tree type, cont, v_main, v_back, v_extra; basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb; basic_block trip_update_bb, cont_bb, fin_bb; block_stmt_iterator si; edge se, re, ene; type = TREE_TYPE (fd->v); entry_bb = region->entry; se = split_block (entry_bb, last_stmt (entry_bb)); entry_bb = se->src; iter_part_bb = se->dest; cont_bb = region->cont; gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2); gcc_assert (BRANCH_EDGE (iter_part_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb)); body_bb = single_succ (seq_start_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); fin_bb = FALLTHRU_EDGE (cont_bb)->dest; trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb)); exit_bb = region->exit; /* Trip and adjustment setup goes in ENTRY_BB. */ si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0); t = fold_convert (type, t); nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0); t = fold_convert (type, t); threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1), true, NULL_TREE, true, BSI_SAME_STMT); fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2), true, NULL_TREE, true, BSI_SAME_STMT); fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step), true, NULL_TREE, true, BSI_SAME_STMT); fd->chunk_size = force_gimple_operand_bsi (&si, fold_convert (type, fd->chunk_size), true, NULL_TREE, true, BSI_SAME_STMT); t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, type, fd->step, t); t = fold_build2 (PLUS_EXPR, type, t, fd->n2); t = fold_build2 (MINUS_EXPR, type, t, fd->n1); t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step); t = fold_convert (type, t); n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); trip_var = create_tmp_var (type, ".trip"); if (gimple_in_ssa_p (cfun)) { add_referenced_var (trip_var); trip_init = make_ssa_name (trip_var, NULL_TREE); trip_main = make_ssa_name (trip_var, NULL_TREE); trip_back = make_ssa_name (trip_var, NULL_TREE); } else { trip_init = trip_var; trip_main = trip_var; trip_back = trip_var; } t = build_gimple_modify_stmt (trip_init, build_int_cst (type, 0)); bsi_insert_before (&si, t, BSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (trip_init) = t; t = fold_build2 (MULT_EXPR, type, threadid, fd->chunk_size); t = fold_build2 (MULT_EXPR, type, t, fd->step); t = fold_build2 (PLUS_EXPR, type, t, fd->n1); v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT); /* Remove the OMP_FOR. */ bsi_remove (&si, true); /* Iteration space partitioning goes in ITER_PART_BB. */ si = bsi_last (iter_part_bb); t = fold_build2 (MULT_EXPR, type, trip_main, nthreads); t = fold_build2 (PLUS_EXPR, type, t, threadid); t = fold_build2 (MULT_EXPR, type, t, fd->chunk_size); s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, false, BSI_CONTINUE_LINKING); t = fold_build2 (PLUS_EXPR, type, s0, fd->chunk_size); t = fold_build2 (MIN_EXPR, type, t, n); e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, false, BSI_CONTINUE_LINKING); t = build2 (LT_EXPR, boolean_type_node, s0, n); t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); /* Setup code for sequential iteration goes in SEQ_START_BB. */ si = bsi_start (seq_start_bb); t = fold_convert (type, s0); t = fold_build2 (MULT_EXPR, type, t, fd->step); t = fold_build2 (PLUS_EXPR, type, t, fd->n1); t = force_gimple_operand_bsi (&si, t, false, NULL_TREE, false, BSI_CONTINUE_LINKING); t = build_gimple_modify_stmt (fd->v, t); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (fd->v) = t; t = fold_convert (type, e0); t = fold_build2 (MULT_EXPR, type, t, fd->step); t = fold_build2 (PLUS_EXPR, type, t, fd->n1); e = force_gimple_operand_bsi (&si, t, true, NULL_TREE, false, BSI_CONTINUE_LINKING); /* The code controlling the sequential loop goes in CONT_BB, replacing the OMP_CONTINUE. */ si = bsi_last (cont_bb); cont = bsi_stmt (si); gcc_assert (TREE_CODE (cont) == OMP_CONTINUE); v_main = TREE_OPERAND (cont, 1); v_back = TREE_OPERAND (cont, 0); t = build2 (PLUS_EXPR, type, v_main, fd->step); t = build_gimple_modify_stmt (v_back, t); bsi_insert_before (&si, t, BSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (v_back) = t; t = build2 (fd->cond_code, boolean_type_node, v_back, e); t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE); bsi_insert_before (&si, t, BSI_SAME_STMT); /* Remove OMP_CONTINUE. */ bsi_remove (&si, true); /* Trip update code goes into TRIP_UPDATE_BB. */ si = bsi_start (trip_update_bb); t = build_int_cst (type, 1); t = build2 (PLUS_EXPR, type, trip_main, t); t = build_gimple_modify_stmt (trip_back, t); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (trip_back) = t; /* Replace the OMP_RETURN with a barrier, or nothing. */ si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si))) force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE, false, BSI_SAME_STMT); bsi_remove (&si, true); /* Connect the new blocks. */ find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE; find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE; find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE; redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb); if (gimple_in_ssa_p (cfun)) { /* When we redirect the edge from trip_update_bb to iter_part_bb, we remove arguments of the phi nodes in fin_bb. We need to create appropriate phi nodes in iter_part_bb instead. */ se = single_pred_edge (fin_bb); re = single_succ_edge (trip_update_bb); ene = single_succ_edge (entry_bb); args = PENDING_STMT (re); PENDING_STMT (re) = NULL_TREE; for (phi = phi_nodes (fin_bb); phi && args; phi = PHI_CHAIN (phi), args = TREE_CHAIN (args)) { t = PHI_RESULT (phi); gcc_assert (t == TREE_PURPOSE (args)); nphi = create_phi_node (t, iter_part_bb); SSA_NAME_DEF_STMT (t) = nphi; t = PHI_ARG_DEF_FROM_EDGE (phi, se); /* A special case -- fd->v is not yet computed in iter_part_bb, we need to use v_extra instead. */ if (t == fd->v) t = v_extra; add_phi_arg (nphi, t, ene); add_phi_arg (nphi, TREE_VALUE (args), re); } gcc_assert (!phi && !args); while ((phi = phi_nodes (fin_bb)) != NULL_TREE) remove_phi_node (phi, NULL_TREE, false); /* Make phi node for trip. */ phi = create_phi_node (trip_main, iter_part_bb); SSA_NAME_DEF_STMT (trip_main) = phi; add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb)); add_phi_arg (phi, trip_init, single_succ_edge (entry_bb)); } set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb); set_immediate_dominator (CDI_DOMINATORS, iter_part_bb, recompute_dominator (CDI_DOMINATORS, iter_part_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, recompute_dominator (CDI_DOMINATORS, seq_start_bb)); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); } /* Expand the OpenMP loop defined by REGION. */ static void expand_omp_for (struct omp_region *region) { struct omp_for_data fd; extract_omp_for_data (last_stmt (region->entry), &fd); region->sched_kind = fd.sched_kind; gcc_assert (EDGE_COUNT (region->entry->succs) == 2); BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; if (region->cont) { gcc_assert (EDGE_COUNT (region->cont->succs) == 2); BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; } if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered && region->cont != NULL) { if (fd.chunk_size == NULL) expand_omp_for_static_nochunk (region, &fd); else expand_omp_for_static_chunk (region, &fd); } else { int fn_index = fd.sched_kind + fd.have_ordered * 4; int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index; int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index; expand_omp_for_generic (region, &fd, start_ix, next_ix); } update_ssa (TODO_update_ssa_only_virtuals); } /* Expand code for an OpenMP sections directive. In pseudo code, we generate v = GOMP_sections_start (n); L0: switch (v) { case 0: goto L2; case 1: section 1; goto L1; case 2: ... case n: ... default: abort (); } L1: v = GOMP_sections_next (); goto L0; L2: reduction; If this is a combined parallel sections, replace the call to GOMP_sections_start with call to GOMP_sections_next. */ static void expand_omp_sections (struct omp_region *region) { tree label_vec, l1, l2, t, u, sections_stmt, vin, vmain, vnext, cont; unsigned i, casei, len; basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb; block_stmt_iterator si; edge_iterator ei; edge e; struct omp_region *inner; bool exit_reachable = region->cont != NULL; gcc_assert (exit_reachable == (region->exit != NULL)); entry_bb = region->entry; l0_bb = single_succ (entry_bb); l1_bb = region->cont; l2_bb = region->exit; if (exit_reachable) { if (single_pred (l2_bb) == l0_bb) l2 = tree_block_label (l2_bb); else { /* This can happen if there are reductions. */ len = EDGE_COUNT (l0_bb->succs); gcc_assert (len > 0); e = EDGE_SUCC (l0_bb, len - 1); si = bsi_last (e->dest); l2 = NULL_TREE; if (bsi_end_p (si) || TREE_CODE (bsi_stmt (si)) != OMP_SECTION) l2 = tree_block_label (e->dest); else FOR_EACH_EDGE (e, ei, l0_bb->succs) { si = bsi_last (e->dest); if (bsi_end_p (si) || TREE_CODE (bsi_stmt (si)) != OMP_SECTION) { l2 = tree_block_label (e->dest); break; } } } default_bb = create_empty_bb (l1_bb->prev_bb); l1 = tree_block_label (l1_bb); } else { default_bb = create_empty_bb (l0_bb); l1 = NULL_TREE; l2 = tree_block_label (default_bb); } /* We will build a switch() with enough cases for all the OMP_SECTION regions, a '0' case to handle the end of more work and a default case to abort if something goes wrong. */ len = EDGE_COUNT (l0_bb->succs); label_vec = make_tree_vec (len + 1); /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the OMP_SECTIONS statement. */ si = bsi_last (entry_bb); sections_stmt = bsi_stmt (si); gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS); vin = OMP_SECTIONS_CONTROL (sections_stmt); if (!is_combined_parallel (region)) { /* If we are not inside a combined parallel+sections region, call GOMP_sections_start. */ t = build_int_cst (unsigned_type_node, exit_reachable ? len - 1 : len); u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START]; t = build_call_expr (u, 1, t); } else { /* Otherwise, call GOMP_sections_next. */ u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT]; t = build_call_expr (u, 0); } t = build_gimple_modify_stmt (vin, t); bsi_insert_after (&si, t, BSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (vin) = t; bsi_remove (&si, true); /* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */ si = bsi_last (l0_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTIONS_SWITCH); if (exit_reachable) { cont = last_stmt (l1_bb); gcc_assert (TREE_CODE (cont) == OMP_CONTINUE); vmain = TREE_OPERAND (cont, 1); vnext = TREE_OPERAND (cont, 0); } else { vmain = vin; vnext = NULL_TREE; } t = build3 (SWITCH_EXPR, void_type_node, vmain, NULL, label_vec); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); i = 0; if (exit_reachable) { t = build3 (CASE_LABEL_EXPR, void_type_node, build_int_cst (unsigned_type_node, 0), NULL, l2); TREE_VEC_ELT (label_vec, 0) = t; i++; } /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */ for (inner = region->inner, casei = 1; inner; inner = inner->next, i++, casei++) { basic_block s_entry_bb, s_exit_bb; /* Skip optional reduction region. */ if (inner->type == OMP_ATOMIC_LOAD) { --i; --casei; continue; } s_entry_bb = inner->entry; s_exit_bb = inner->exit; t = tree_block_label (s_entry_bb); u = build_int_cst (unsigned_type_node, casei); u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t); TREE_VEC_ELT (label_vec, i) = u; si = bsi_last (s_entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION); gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si))); bsi_remove (&si, true); single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU; if (s_exit_bb == NULL) continue; si = bsi_last (s_exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_remove (&si, true); single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU; } /* Error handling code goes in DEFAULT_BB. */ t = tree_block_label (default_bb); u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t); TREE_VEC_ELT (label_vec, len) = u; make_edge (l0_bb, default_bb, 0); si = bsi_start (default_bb); t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); if (exit_reachable) { /* Code to get the next section goes in L1_BB. */ si = bsi_last (l1_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0); t = build_gimple_modify_stmt (vnext, t); bsi_insert_after (&si, t, BSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (vnext) = t; bsi_remove (&si, true); single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU; /* Cleanup function replaces OMP_RETURN in EXIT_BB. */ si = bsi_last (l2_bb); if (OMP_RETURN_NOWAIT (bsi_stmt (si))) t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT]; else t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END]; t = build_call_expr (t, 0); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb); } /* Expand code for an OpenMP single directive. We've already expanded much of the code, here we simply place the GOMP_barrier call. */ static void expand_omp_single (struct omp_region *region) { basic_block entry_bb, exit_bb; block_stmt_iterator si; bool need_barrier = false; entry_bb = region->entry; exit_bb = region->exit; si = bsi_last (entry_bb); /* The terminal barrier at the end of a GOMP_single_copy sequence cannot be removed. We need to ensure that the thread that entered the single does not exit before the data is copied out by the other threads. */ if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)), OMP_CLAUSE_COPYPRIVATE)) need_barrier = true; gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE); bsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier) force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE, false, BSI_SAME_STMT); bsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } /* Generic expansion for OpenMP synchronization directives: master, ordered and critical. All we need to do here is remove the entry and exit markers for REGION. */ static void expand_omp_synch (struct omp_region *region) { basic_block entry_bb, exit_bb; block_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE || TREE_CODE (bsi_stmt (si)) == OMP_MASTER || TREE_CODE (bsi_stmt (si)) == OMP_ORDERED || TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL); bsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; if (exit_bb) { si = bsi_last (exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a __sync_fetch_and_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns false if the expression is not of the proper form. */ static bool expand_omp_atomic_fetch_op (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function base; tree decl, itype, call; enum insn_code *optab; tree rhs; basic_block store_bb = single_succ (load_bb); block_stmt_iterator bsi; tree stmt; /* We expect to find the following sequences: load_bb: OMP_ATOMIC_LOAD (tmp, mem) store_bb: val = tmp OP something; (or: something OP tmp) OMP_STORE (val) ???FIXME: Allow a more flexible sequence. Perhaps use data flow to pick the statements. */ bsi = bsi_after_labels (store_bb); stmt = bsi_stmt (bsi); if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) return false; bsi_next (&bsi); if (TREE_CODE (bsi_stmt (bsi)) != OMP_ATOMIC_STORE) return false; if (!operand_equal_p (GIMPLE_STMT_OPERAND (stmt, 0), stored_val, 0)) return false; rhs = GIMPLE_STMT_OPERAND (stmt, 1); /* Check for one of the supported fetch-op operations. */ switch (TREE_CODE (rhs)) { case PLUS_EXPR: case POINTER_PLUS_EXPR: base = BUILT_IN_FETCH_AND_ADD_N; optab = sync_add_optab; break; case MINUS_EXPR: base = BUILT_IN_FETCH_AND_SUB_N; optab = sync_add_optab; break; case BIT_AND_EXPR: base = BUILT_IN_FETCH_AND_AND_N; optab = sync_and_optab; break; case BIT_IOR_EXPR: base = BUILT_IN_FETCH_AND_OR_N; optab = sync_ior_optab; break; case BIT_XOR_EXPR: base = BUILT_IN_FETCH_AND_XOR_N; optab = sync_xor_optab; break; default: return false; } /* Make sure the expression is of the proper form. */ if (operand_equal_p (TREE_OPERAND (rhs, 0), loaded_val, 0)) rhs = TREE_OPERAND (rhs, 1); else if (commutative_tree_code (TREE_CODE (rhs)) && operand_equal_p (TREE_OPERAND (rhs, 1), loaded_val, 0)) rhs = TREE_OPERAND (rhs, 0); else return false; decl = built_in_decls[base + index + 1]; itype = TREE_TYPE (TREE_TYPE (decl)); if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing) return false; bsi = bsi_last (load_bb); gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD); call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs)); call = fold_convert (void_type_node, call); force_gimple_operand_bsi (&bsi, call, true, NULL_TREE, true, BSI_SAME_STMT); bsi_remove (&bsi, true); bsi = bsi_last (store_bb); gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE); bsi_remove (&bsi, true); bsi = bsi_last (store_bb); bsi_remove (&bsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static bool expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val, int index) { tree loadedi, storedi, initial, new_storedi, old_vali; tree type, itype, cmpxchg, iaddr; block_stmt_iterator bsi; basic_block loop_header = single_succ (load_bb); tree phi, x; edge e; cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1]; type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); itype = TREE_TYPE (TREE_TYPE (cmpxchg)); if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing) return false; /* Load the initial value, replacing the OMP_ATOMIC_LOAD. */ bsi = bsi_last (load_bb); gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) { iaddr = create_tmp_var (build_pointer_type (itype), NULL); x = build_gimple_modify_stmt (iaddr, fold_convert (TREE_TYPE (iaddr), addr)); force_gimple_operand_bsi (&bsi, x, true, NULL_TREE, true, BSI_SAME_STMT); DECL_NO_TBAA_P (iaddr) = 1; DECL_POINTER_ALIAS_SET (iaddr) = 0; loadedi = create_tmp_var (itype, NULL); if (gimple_in_ssa_p (cfun)) { add_referenced_var (iaddr); add_referenced_var (loadedi); loadedi = make_ssa_name (loadedi, NULL); } } else { iaddr = addr; loadedi = loaded_val; } initial = force_gimple_operand_bsi (&bsi, build_fold_indirect_ref (iaddr), true, NULL_TREE, true, BSI_SAME_STMT); /* Move the value to the LOADEDI temporary. */ if (gimple_in_ssa_p (cfun)) { gcc_assert (phi_nodes (loop_header) == NULL_TREE); phi = create_phi_node (loadedi, loop_header); SSA_NAME_DEF_STMT (loadedi) = phi; SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)), initial); } else bsi_insert_before (&bsi, build_gimple_modify_stmt (loadedi, initial), BSI_SAME_STMT); if (loadedi != loaded_val) { block_stmt_iterator bsi2; x = build1 (VIEW_CONVERT_EXPR, type, loadedi); bsi2 = bsi_start (loop_header); if (gimple_in_ssa_p (cfun)) { x = force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE, true, BSI_SAME_STMT); x = build_gimple_modify_stmt (loaded_val, x); bsi_insert_before (&bsi2, x, BSI_SAME_STMT); SSA_NAME_DEF_STMT (loaded_val) = x; } else { x = build_gimple_modify_stmt (loaded_val, x); force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE, true, BSI_SAME_STMT); } } bsi_remove (&bsi, true); bsi = bsi_last (store_bb); gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE); if (iaddr == addr) storedi = stored_val; else storedi = force_gimple_operand_bsi (&bsi, build1 (VIEW_CONVERT_EXPR, itype, stored_val), true, NULL_TREE, true, BSI_SAME_STMT); /* Build the compare&swap statement. */ new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi); new_storedi = force_gimple_operand_bsi (&bsi, fold_convert (itype, new_storedi), true, NULL_TREE, true, BSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) old_vali = loadedi; else { old_vali = create_tmp_var (itype, NULL); if (gimple_in_ssa_p (cfun)) add_referenced_var (old_vali); x = build_gimple_modify_stmt (old_vali, loadedi); force_gimple_operand_bsi (&bsi, x, true, NULL_TREE, true, BSI_SAME_STMT); x = build_gimple_modify_stmt (loadedi, new_storedi); force_gimple_operand_bsi (&bsi, x, true, NULL_TREE, true, BSI_SAME_STMT); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ x = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali); x = build3 (COND_EXPR, void_type_node, x, NULL_TREE, NULL_TREE); bsi_insert_before (&bsi, x, BSI_SAME_STMT); /* Update cfg. */ e = single_succ_edge (store_bb); e->flags &= ~EDGE_FALLTHRU; e->flags |= EDGE_FALSE_VALUE; e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE); /* Copy the new value to loadedi (we already did that before the condition if we are not in SSA). */ if (gimple_in_ssa_p (cfun)) { phi = phi_nodes (loop_header); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi); } /* Remove OMP_ATOMIC_STORE. */ bsi_remove (&bsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. LOADED_VAL and ADDR are the operands of OMP_ATOMIC_LOAD we're expanding. STORED_VAL is the operand of the matching OMP_ATOMIC_STORE. We replace OMP_ATOMIC_LOAD (loaded_val, addr) with loaded_val = *addr; and replace OMP_ATOMIC_ATORE (stored_val) with *addr = stored_val; */ static bool expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val) { block_stmt_iterator bsi; tree t; bsi = bsi_last (load_bb); gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD); t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START]; t = build_function_call_expr (t, 0); force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT); t = build_gimple_modify_stmt (loaded_val, build_fold_indirect_ref (addr)); if (gimple_in_ssa_p (cfun)) SSA_NAME_DEF_STMT (loaded_val) = t; bsi_insert_before (&bsi, t, BSI_SAME_STMT); bsi_remove (&bsi, true); bsi = bsi_last (store_bb); gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE); t = build_gimple_modify_stmt (build_fold_indirect_ref (unshare_expr (addr)), stored_val); bsi_insert_before (&bsi, t, BSI_SAME_STMT); t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END]; t = build_function_call_expr (t, 0); force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT); bsi_remove (&bsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* Expand an OMP_ATOMIC statement. We try to expand using expand_omp_atomic_fetch_op. If it failed, we try to call expand_omp_atomic_pipeline, and if it fails too, the ultimate fallback is wrapping the operation in a mutex (expand_omp_atomic_mutex). REGION is the atomic region built by build_omp_regions_1(). */ static void expand_omp_atomic (struct omp_region *region) { basic_block load_bb = region->entry, store_bb = region->exit; tree load = last_stmt (load_bb), store = last_stmt (store_bb); tree loaded_val = TREE_OPERAND (load, 0); tree addr = TREE_OPERAND (load, 1); tree stored_val = TREE_OPERAND (store, 0); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_low_cst (TYPE_SIZE_UNIT (type), 1); index = exact_log2 (index); if (index >= 0 && index <= 4) { unsigned int align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* When possible, use specialized atomic update functions. */ if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) && store_bb == single_succ (load_bb)) { if (expand_omp_atomic_fetch_op (load_bb, addr, loaded_val, stored_val, index)) return; } /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ if (expand_omp_atomic_pipeline (load_bb, store_bb, addr, loaded_val, stored_val, index)) return; } } /* The ultimate fallback is wrapping the operation in a mutex. */ expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val); } /* Expand the parallel region tree rooted at REGION. Expansion proceeds in depth-first order. Innermost regions are expanded first. This way, parallel regions that require a new function to be created (e.g., OMP_PARALLEL) can be expanded without having any internal dependencies in their body. */ static void expand_omp (struct omp_region *region) { while (region) { /* First, determine whether this is a combined parallel+workshare region. */ if (region->type == OMP_PARALLEL) determine_parallel_type (region); if (region->inner) expand_omp (region->inner); switch (region->type) { case OMP_PARALLEL: expand_omp_parallel (region); break; case OMP_FOR: expand_omp_for (region); break; case OMP_SECTIONS: expand_omp_sections (region); break; case OMP_SECTION: /* Individual omp sections are handled together with their parent OMP_SECTIONS region. */ break; case OMP_SINGLE: expand_omp_single (region); break; case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: expand_omp_synch (region); break; case OMP_ATOMIC_LOAD: expand_omp_atomic (region); break; default: gcc_unreachable (); } region = region->next; } } /* Helper for build_omp_regions. Scan the dominator tree starting at block BB. PARENT is the region that contains BB. If SINGLE_TREE is true, the function ends once a single tree is built (otherwise, whole forest of OMP constructs may be built). */ static void build_omp_regions_1 (basic_block bb, struct omp_region *parent, bool single_tree) { block_stmt_iterator si; tree stmt; basic_block son; si = bsi_last (bb); if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si))) { struct omp_region *region; enum tree_code code; stmt = bsi_stmt (si); code = TREE_CODE (stmt); if (code == OMP_RETURN) { /* STMT is the return point out of region PARENT. Mark it as the exit point and make PARENT the immediately enclosing region. */ gcc_assert (parent); region = parent; region->exit = bb; parent = parent->outer; } else if (code == OMP_ATOMIC_STORE) { /* OMP_ATOMIC_STORE is analoguous to OMP_RETURN, but matches with OMP_ATOMIC_LOAD. */ gcc_assert (parent); gcc_assert (parent->type == OMP_ATOMIC_LOAD); region = parent; region->exit = bb; parent = parent->outer; } else if (code == OMP_CONTINUE) { gcc_assert (parent); parent->cont = bb; } else if (code == OMP_SECTIONS_SWITCH) { /* OMP_SECTIONS_SWITCH is part of OMP_SECTIONS, and we do nothing for it. */ ; } else { /* Otherwise, this directive becomes the parent for a new region. */ region = new_omp_region (bb, code, parent); parent = region; } } if (single_tree && !parent) return; for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) build_omp_regions_1 (son, parent, single_tree); } /* Builds the tree of OMP regions rooted at ROOT, storing it to root_omp_region. */ static void build_omp_regions_root (basic_block root) { gcc_assert (root_omp_region == NULL); build_omp_regions_1 (root, NULL, true); gcc_assert (root_omp_region != NULL); } /* Expands omp construct (and its subconstructs) starting in HEAD. */ void omp_expand_local (basic_block head) { build_omp_regions_root (head); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); free_omp_regions (); } /* Scan the CFG and build a tree of OMP regions. Return the root of the OMP region tree. */ static void build_omp_regions (void) { gcc_assert (root_omp_region == NULL); calculate_dominance_info (CDI_DOMINATORS); build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false); } /* Main entry point for expanding OMP-GIMPLE into runtime calls. */ static unsigned int execute_expand_omp (void) { build_omp_regions (); if (!root_omp_region) return 0; if (dump_file) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); cleanup_tree_cfg (); free_omp_regions (); return 0; } /* OMP expansion in SSA form. For testing purposes only. */ static bool gate_expand_omp_ssa (void) { return flag_openmp_ssa && flag_openmp != 0 && errorcount == 0; } struct tree_opt_pass pass_expand_omp_ssa = { "ompexpssa", /* name */ gate_expand_omp_ssa, /* gate */ execute_expand_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ 0 /* letter */ }; /* OMP expansion -- the default pass, run before creation of SSA form. */ static bool gate_expand_omp (void) { return ((!flag_openmp_ssa || !optimize) && flag_openmp != 0 && errorcount == 0); } struct tree_opt_pass pass_expand_omp = { "ompexp", /* name */ gate_expand_omp, /* gate */ execute_expand_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ 0 /* letter */ }; /* Routines to lower OpenMP directives into OMP-GIMPLE. */ /* Lower the OpenMP sections directive in *STMT_P. */ static void lower_omp_sections (tree *stmt_p, omp_context *ctx) { tree new_stmt, stmt, body, bind, block, ilist, olist, new_body, control; tree t, dlist; tree_stmt_iterator tsi; unsigned i, len; stmt = *stmt_p; push_gimplify_context (); dlist = NULL; ilist = NULL; lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx); tsi = tsi_start (OMP_SECTIONS_BODY (stmt)); for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi)) continue; tsi = tsi_start (OMP_SECTIONS_BODY (stmt)); body = alloc_stmt_list (); for (i = 0; i < len; i++, tsi_next (&tsi)) { omp_context *sctx; tree sec_start, sec_end; sec_start = tsi_stmt (tsi); sctx = maybe_lookup_ctx (sec_start); gcc_assert (sctx); append_to_statement_list (sec_start, &body); lower_omp (&OMP_SECTION_BODY (sec_start), sctx); append_to_statement_list (OMP_SECTION_BODY (sec_start), &body); OMP_SECTION_BODY (sec_start) = NULL; if (i == len - 1) { tree l = alloc_stmt_list (); lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL, &l, ctx); append_to_statement_list (l, &body); OMP_SECTION_LAST (sec_start) = 1; } sec_end = make_node (OMP_RETURN); append_to_statement_list (sec_end, &body); } block = make_node (BLOCK); bind = build3 (BIND_EXPR, void_type_node, NULL, body, block); olist = NULL_TREE; lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx); pop_gimplify_context (NULL_TREE); record_vars_into (ctx->block_vars, ctx->cb.dst_fn); new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (new_stmt) = 1; new_body = alloc_stmt_list (); append_to_statement_list (ilist, &new_body); append_to_statement_list (stmt, &new_body); append_to_statement_list (make_node (OMP_SECTIONS_SWITCH), &new_body); append_to_statement_list (bind, &new_body); control = create_tmp_var (unsigned_type_node, ".section"); t = build2 (OMP_CONTINUE, void_type_node, control, control); OMP_SECTIONS_CONTROL (stmt) = control; append_to_statement_list (t, &new_body); append_to_statement_list (olist, &new_body); append_to_statement_list (dlist, &new_body); maybe_catch_exception (&new_body); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt), OMP_CLAUSE_NOWAIT); append_to_statement_list (t, &new_body); BIND_EXPR_BODY (new_stmt) = new_body; OMP_SECTIONS_BODY (stmt) = NULL; *stmt_p = new_stmt; } /* A subroutine of lower_omp_single. Expand the simple form of an OMP_SINGLE, without a copyprivate clause: if (GOMP_single_start ()) BODY; [ GOMP_barrier (); ] -> unless 'nowait' is present. FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_simple (tree single_stmt, tree *pre_p) { tree t; t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_START], 0); t = build3 (COND_EXPR, void_type_node, t, OMP_SINGLE_BODY (single_stmt), NULL); gimplify_and_add (t, pre_p); } /* A subroutine of lower_omp_single. Expand the simple form of an OMP_SINGLE, with a copyprivate clause: #pragma omp single copyprivate (a, b, c) Create a new structure to hold copies of 'a', 'b' and 'c' and emit: { if ((copyout_p = GOMP_single_copy_start ()) == NULL) { BODY; copyout.a = a; copyout.b = b; copyout.c = c; GOMP_single_copy_end (&copyout); } else { a = copyout_p->a; b = copyout_p->b; c = copyout_p->c; } GOMP_barrier (); } FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx) { tree ptr_type, t, l0, l1, l2, copyin_seq; ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o"); ptr_type = build_pointer_type (ctx->record_type); ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i"); l0 = create_artificial_label (); l1 = create_artificial_label (); l2 = create_artificial_label (); t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0); t = fold_convert (ptr_type, t); t = build_gimple_modify_stmt (ctx->receiver_decl, t); gimplify_and_add (t, pre_p); t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl, build_int_cst (ptr_type, 0)); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l1)); gimplify_and_add (t, pre_p); t = build1 (LABEL_EXPR, void_type_node, l0); gimplify_and_add (t, pre_p); append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p); copyin_seq = NULL; lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p, &copyin_seq, ctx); t = build_fold_addr_expr (ctx->sender_decl); t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t); gimplify_and_add (t, pre_p); t = build_and_jump (&l2); gimplify_and_add (t, pre_p); t = build1 (LABEL_EXPR, void_type_node, l1); gimplify_and_add (t, pre_p); append_to_statement_list (copyin_seq, pre_p); t = build1 (LABEL_EXPR, void_type_node, l2); gimplify_and_add (t, pre_p); } /* Expand code for an OpenMP single directive. */ static void lower_omp_single (tree *stmt_p, omp_context *ctx) { tree t, bind, block, single_stmt = *stmt_p, dlist; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt), &BIND_EXPR_BODY (bind), &dlist, ctx); lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx); append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind)); if (ctx->record_type) lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx); else lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind)); OMP_SINGLE_BODY (single_stmt) = NULL; append_to_statement_list (dlist, &BIND_EXPR_BODY (bind)); maybe_catch_exception (&BIND_EXPR_BODY (bind)); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt), OMP_CLAUSE_NOWAIT); append_to_statement_list (t, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Expand code for an OpenMP master directive. */ static void lower_omp_master (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p, lab = NULL, x; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0); x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node); x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab)); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_MASTER_BODY (stmt), ctx); maybe_catch_exception (&OMP_MASTER_BODY (stmt)); append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_MASTER_BODY (stmt) = NULL; x = build1 (LABEL_EXPR, void_type_node, lab); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); x = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (x) = 1; append_to_statement_list (x, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Expand code for an OpenMP ordered directive. */ static void lower_omp_ordered (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p, x; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_ORDERED_BODY (stmt), ctx); maybe_catch_exception (&OMP_ORDERED_BODY (stmt)); append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_ORDERED_BODY (stmt) = NULL; x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); x = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (x) = 1; append_to_statement_list (x, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Gimplify an OMP_CRITICAL statement. This is a relatively simple substitution of a couple of function calls. But in the NAMED case, requires that languages coordinate a symbol name. It is therefore best put here in common code. */ static GTY((param1_is (tree), param2_is (tree))) splay_tree critical_name_mutexes; static void lower_omp_critical (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p; tree t, lock, unlock, name; name = OMP_CRITICAL_NAME (stmt); if (name) { tree decl; splay_tree_node n; if (!critical_name_mutexes) critical_name_mutexes = splay_tree_new_ggc (splay_tree_compare_pointers); n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name); if (n == NULL) { char *new_str; decl = create_tmp_var_raw (ptr_type_node, NULL); new_str = ACONCAT ((".gomp_critical_user_", IDENTIFIER_POINTER (name), NULL)); DECL_NAME (decl) = get_identifier (new_str); TREE_PUBLIC (decl) = 1; TREE_STATIC (decl) = 1; DECL_COMMON (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; varpool_finalize_decl (decl); splay_tree_insert (critical_name_mutexes, (splay_tree_key) name, (splay_tree_value) decl); } else decl = (tree) n->value; lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START]; lock = build_call_expr (lock, 1, build_fold_addr_expr (decl)); unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END]; unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl)); } else { lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START]; lock = build_call_expr (lock, 0); unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END]; unlock = build_call_expr (unlock, 0); } push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); gimplify_and_add (lock, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_CRITICAL_BODY (stmt), ctx); maybe_catch_exception (&OMP_CRITICAL_BODY (stmt)); append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_CRITICAL_BODY (stmt) = NULL; gimplify_and_add (unlock, &BIND_EXPR_BODY (bind)); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = 1; append_to_statement_list (t, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* A subroutine of lower_omp_for. Generate code to emit the predicate for a lastprivate clause. Given a loop control predicate of (V cond N2), we gate the clause on (!(V cond N2)). The lowered form is appended to *DLIST, iterator initialization is appended to *BODY_P. */ static void lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p, tree *dlist, struct omp_context *ctx) { tree clauses, cond, stmts, vinit, t; enum tree_code cond_code; cond_code = fd->cond_code; cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR; /* When possible, use a strict equality expression. This can let VRP type optimizations deduce the value and remove a copy. */ if (host_integerp (fd->step, 0)) { HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step); if (step == 1 || step == -1) cond_code = EQ_EXPR; } cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2); clauses = OMP_FOR_CLAUSES (fd->for_stmt); stmts = NULL; lower_lastprivate_clauses (clauses, cond, &stmts, ctx); if (stmts != NULL) { append_to_statement_list (stmts, dlist); /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */ vinit = fd->n1; if (cond_code == EQ_EXPR && host_integerp (fd->n2, 0) && ! integer_zerop (fd->n2)) vinit = build_int_cst (TREE_TYPE (fd->v), 0); /* Initialize the iterator variable, so that threads that don't execute any iterations don't execute the lastprivate clauses by accident. */ t = build_gimple_modify_stmt (fd->v, vinit); gimplify_and_add (t, body_p); } } /* Lower code for an OpenMP loop directive. */ static void lower_omp_for (tree *stmt_p, omp_context *ctx) { tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p; struct omp_for_data fd; stmt = *stmt_p; push_gimplify_context (); lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx); lower_omp (&OMP_FOR_BODY (stmt), ctx); /* Move declaration of temporaries in the loop body before we make it go away. */ if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR) record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn); new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (new_stmt) = 1; body_p = &BIND_EXPR_BODY (new_stmt); /* The pre-body and input clauses go before the lowered OMP_FOR. */ ilist = NULL; dlist = NULL; append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p); lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx); /* Lower the header expressions. At this point, we can assume that the header is of the form: #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3) We just need to make sure that VAL1, VAL2 and VAL3 are lowered using the .omp_data_s mapping, if needed. */ rhs_p = &GIMPLE_STMT_OPERAND (OMP_FOR_INIT (stmt), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); rhs_p = &TREE_OPERAND (GIMPLE_STMT_OPERAND (OMP_FOR_INCR (stmt), 1), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); /* Once lowered, extract the bounds and clauses. */ extract_omp_for_data (stmt, &fd); lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx); append_to_statement_list (stmt, body_p); append_to_statement_list (OMP_FOR_BODY (stmt), body_p); t = build2 (OMP_CONTINUE, void_type_node, fd.v, fd.v); append_to_statement_list (t, body_p); /* After the loop, add exit clauses. */ lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx); append_to_statement_list (dlist, body_p); maybe_catch_exception (body_p); /* Region exit marker goes at the end of the loop body. */ t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = fd.have_nowait; append_to_statement_list (t, body_p); pop_gimplify_context (NULL_TREE); record_vars_into (ctx->block_vars, ctx->cb.dst_fn); OMP_FOR_BODY (stmt) = NULL_TREE; OMP_FOR_PRE_BODY (stmt) = NULL_TREE; *stmt_p = new_stmt; } /* Callback for walk_stmts. Check if *TP only contains OMP_FOR or OMP_PARALLEL. */ static tree check_combined_parallel (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; int *info = wi->info; *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_FOR: case OMP_SECTIONS: *info = *info == 0 ? 1 : -1; break; default: *info = -1; break; } return NULL; } /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context information for the directive. */ static void lower_omp_parallel (tree *stmt_p, omp_context *ctx) { tree clauses, par_bind, par_body, new_body, bind; tree olist, ilist, par_olist, par_ilist; tree stmt, child_fn, t; stmt = *stmt_p; clauses = OMP_PARALLEL_CLAUSES (stmt); par_bind = OMP_PARALLEL_BODY (stmt); par_body = BIND_EXPR_BODY (par_bind); child_fn = ctx->cb.dst_fn; if (!OMP_PARALLEL_COMBINED (stmt)) { struct walk_stmt_info wi; int ws_num = 0; memset (&wi, 0, sizeof (wi)); wi.callback = check_combined_parallel; wi.info = &ws_num; wi.val_only = true; walk_stmts (&wi, &par_bind); if (ws_num == 1) OMP_PARALLEL_COMBINED (stmt) = 1; } push_gimplify_context (); par_olist = NULL_TREE; par_ilist = NULL_TREE; lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx); lower_omp (&par_body, ctx); lower_reduction_clauses (clauses, &par_olist, ctx); /* Declare all the variables created by mapping and the variables declared in the scope of the parallel body. */ record_vars_into (ctx->block_vars, child_fn); record_vars_into (BIND_EXPR_VARS (par_bind), child_fn); if (ctx->record_type) { ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o"); OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl; } olist = NULL_TREE; ilist = NULL_TREE; lower_send_clauses (clauses, &ilist, &olist, ctx); lower_send_shared_vars (&ilist, &olist, ctx); /* Once all the expansions are done, sequence all the different fragments inside OMP_PARALLEL_BODY. */ bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); append_to_statement_list (ilist, &BIND_EXPR_BODY (bind)); new_body = alloc_stmt_list (); if (ctx->record_type) { t = build_fold_addr_expr (ctx->sender_decl); /* fixup_child_record_type might have changed receiver_decl's type. */ t = fold_convert (TREE_TYPE (ctx->receiver_decl), t); t = build_gimple_modify_stmt (ctx->receiver_decl, t); append_to_statement_list (t, &new_body); } append_to_statement_list (par_ilist, &new_body); append_to_statement_list (par_body, &new_body); append_to_statement_list (par_olist, &new_body); maybe_catch_exception (&new_body); t = make_node (OMP_RETURN); append_to_statement_list (t, &new_body); OMP_PARALLEL_BODY (stmt) = new_body; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); append_to_statement_list (olist, &BIND_EXPR_BODY (bind)); *stmt_p = bind; pop_gimplify_context (NULL_TREE); } /* Callback for lower_omp_1. Return non-NULL if *tp needs to be regimplified. */ static tree lower_omp_2 (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { tree t = *tp; /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */ if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t)) return t; /* If a global variable has been privatized, TREE_CONSTANT on ADDR_EXPR might be wrong. */ if (TREE_CODE (t) == ADDR_EXPR) recompute_tree_invariant_for_addr_expr (t); *walk_subtrees = !TYPE_P (t) && !DECL_P (t); return NULL_TREE; } static void lower_omp_1 (tree *tp, omp_context *ctx, tree_stmt_iterator *tsi) { tree t = *tp; if (!t) return; if (EXPR_HAS_LOCATION (t)) input_location = EXPR_LOCATION (t); /* If we have issued syntax errors, avoid doing any heavy lifting. Just replace the OpenMP directives with a NOP to avoid confusing RTL expansion. */ if (errorcount && OMP_DIRECTIVE_P (t)) { *tp = build_empty_stmt (); return; } switch (TREE_CODE (t)) { case STATEMENT_LIST: { tree_stmt_iterator i; for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i)) lower_omp_1 (tsi_stmt_ptr (i), ctx, &i); } break; case COND_EXPR: lower_omp_1 (&COND_EXPR_THEN (t), ctx, NULL); lower_omp_1 (&COND_EXPR_ELSE (t), ctx, NULL); if (ctx && walk_tree (&COND_EXPR_COND (t), lower_omp_2, ctx, NULL)) { tree pre = NULL; gimplify_expr (&COND_EXPR_COND (t), &pre, NULL, is_gimple_condexpr, fb_rvalue); if (pre) { if (tsi) tsi_link_before (tsi, pre, TSI_SAME_STMT); else { append_to_statement_list (t, &pre); *tp = pre; } } } break; case CATCH_EXPR: lower_omp_1 (&CATCH_BODY (t), ctx, NULL); break; case EH_FILTER_EXPR: lower_omp_1 (&EH_FILTER_FAILURE (t), ctx, NULL); break; case TRY_CATCH_EXPR: case TRY_FINALLY_EXPR: lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL); lower_omp_1 (&TREE_OPERAND (t, 1), ctx, NULL); break; case BIND_EXPR: lower_omp_1 (&BIND_EXPR_BODY (t), ctx, NULL); break; case RETURN_EXPR: lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL); break; case OMP_PARALLEL: ctx = maybe_lookup_ctx (t); lower_omp_parallel (tp, ctx); break; case OMP_FOR: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_for (tp, ctx); break; case OMP_SECTIONS: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_sections (tp, ctx); break; case OMP_SINGLE: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_single (tp, ctx); break; case OMP_MASTER: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_master (tp, ctx); break; case OMP_ORDERED: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_ordered (tp, ctx); break; case OMP_CRITICAL: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_critical (tp, ctx); break; default: if (ctx && walk_tree (tp, lower_omp_2, ctx, NULL)) { /* The gimplifier doesn't gimplify CALL_EXPR_STATIC_CHAIN. Handle that here. */ tree call = get_call_expr_in (t); if (call && CALL_EXPR_STATIC_CHAIN (call) && walk_tree (&CALL_EXPR_STATIC_CHAIN (call), lower_omp_2, ctx, NULL)) { tree pre = NULL; gimplify_expr (&CALL_EXPR_STATIC_CHAIN (call), &pre, NULL, is_gimple_val, fb_rvalue); if (pre) { if (tsi) tsi_link_before (tsi, pre, TSI_SAME_STMT); else { append_to_statement_list (t, &pre); lower_omp_1 (&pre, ctx, NULL); *tp = pre; return; } } } if (tsi == NULL) gimplify_stmt (tp); else { tree pre = NULL; gimplify_expr (tp, &pre, NULL, is_gimple_stmt, fb_none); if (pre) tsi_link_before (tsi, pre, TSI_SAME_STMT); } } break; } } static void lower_omp (tree *stmt_p, omp_context *ctx) { lower_omp_1 (stmt_p, ctx, NULL); } /* Main entry point. */ static unsigned int execute_lower_omp (void) { all_contexts = splay_tree_new (splay_tree_compare_pointers, 0, delete_omp_context); scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL); gcc_assert (parallel_nesting_level == 0); if (all_contexts->root) lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL); if (all_contexts) { splay_tree_delete (all_contexts); all_contexts = NULL; } return 0; } static bool gate_lower_omp (void) { return flag_openmp != 0; } struct tree_opt_pass pass_lower_omp = { "omplower", /* name */ gate_lower_omp, /* gate */ execute_lower_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ 0 /* letter */ }; /* The following is a utility to diagnose OpenMP structured block violations. It is not part of the "omplower" pass, as that's invoked too late. It should be invoked by the respective front ends after gimplification. */ static splay_tree all_labels; /* Check for mismatched contexts and generate an error if needed. Return true if an error is detected. */ static bool diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx) { bool exit_p = true; if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx) return false; /* Try to avoid confusing the user by producing and error message with correct "exit" or "enter" verbage. We prefer "exit" unless we can show that LABEL_CTX is nested within BRANCH_CTX. */ if (branch_ctx == NULL) exit_p = false; else { while (label_ctx) { if (TREE_VALUE (label_ctx) == branch_ctx) { exit_p = false; break; } label_ctx = TREE_CHAIN (label_ctx); } } if (exit_p) error ("invalid exit from OpenMP structured block"); else error ("invalid entry to OpenMP structured block"); *stmt_p = build_empty_stmt (); return true; } /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record where in the tree each label is found. */ static tree diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; tree context = (tree) wi->info; tree inner_context; tree t = *tp; *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: case OMP_SECTIONS: case OMP_SINGLE: walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL); /* FALLTHRU */ case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: /* The minimal context here is just a tree of statements. */ inner_context = tree_cons (NULL, t, context); wi->info = inner_context; walk_stmts (wi, &OMP_BODY (t)); wi->info = context; break; case OMP_FOR: walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL); inner_context = tree_cons (NULL, t, context); wi->info = inner_context; walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL); walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL); walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL); walk_stmts (wi, &OMP_FOR_PRE_BODY (t)); walk_stmts (wi, &OMP_FOR_BODY (t)); wi->info = context; break; case LABEL_EXPR: splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t), (splay_tree_value) context); break; default: break; } return NULL_TREE; } /* Pass 2: Check each branch and see if its context differs from that of the destination label's context. */ static tree diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; tree context = (tree) wi->info; splay_tree_node n; tree t = *tp; *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: case OMP_SECTIONS: case OMP_SINGLE: walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL); /* FALLTHRU */ case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: wi->info = t; walk_stmts (wi, &OMP_BODY (t)); wi->info = context; break; case OMP_FOR: walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL); wi->info = t; walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL); walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL); walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL); walk_stmts (wi, &OMP_FOR_PRE_BODY (t)); walk_stmts (wi, &OMP_FOR_BODY (t)); wi->info = context; break; case GOTO_EXPR: { tree lab = GOTO_DESTINATION (t); if (TREE_CODE (lab) != LABEL_DECL) break; n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE); } break; case SWITCH_EXPR: { tree vec = SWITCH_LABELS (t); int i, len = TREE_VEC_LENGTH (vec); for (i = 0; i < len; ++i) { tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i)); n = splay_tree_lookup (all_labels, (splay_tree_key) lab); if (diagnose_sb_0 (tp, context, (tree) n->value)) break; } } break; case RETURN_EXPR: diagnose_sb_0 (tp, context, NULL_TREE); break; default: break; } return NULL_TREE; } void diagnose_omp_structured_block_errors (tree fndecl) { tree save_current = current_function_decl; struct walk_stmt_info wi; current_function_decl = fndecl; all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0); memset (&wi, 0, sizeof (wi)); wi.callback = diagnose_sb_1; walk_stmts (&wi, &DECL_SAVED_TREE (fndecl)); memset (&wi, 0, sizeof (wi)); wi.callback = diagnose_sb_2; wi.want_locations = true; wi.want_return_expr = true; walk_stmts (&wi, &DECL_SAVED_TREE (fndecl)); splay_tree_delete (all_labels); all_labels = NULL; current_function_decl = save_current; } #include "gt-omp-low.h"
base.h
#include "callback.h" #include <omp.h> int main() { unsigned int i; #pragma omp parallel for num_threads(4) schedule(SCHEDULE) for (i = 0; i < 4; i++) { } // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id={{[0-9]+}}, parent_task_frame=0x{{[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker={{.*}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] return 0; }
kernel_cpu_2.balance.c
#include "hclib.h" int ____num_tasks[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; // #ifdef __cplusplus // extern "C" { // #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 #include <omp.h> // (in directory known to compiler) #include <stdlib.h> // (in directory known to compiler) //======================================================================================================================================================150 // COMMON //======================================================================================================================================================150 #include "common.h" // (in directory provided here) //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "timer.h" // (in directory provided here) needed by timer //======================================================================================================================================================150 // HEADER //======================================================================================================================================================150 #include "./kernel_cpu_2.h" // (in directory provided here) //========================================================================================================================================================================================================200 // PLASMAKERNEL_GPU //========================================================================================================================================================================================================200 void kernel_cpu_2( int cores_arg, knode *knodes, long knodes_elem, int order, long maxheight, int count, long *currKnode, long *offset, long *lastKnode, long *offset_2, int *start, int *end, int *recstart, int *reclength) { //======================================================================================================================================================150 // Variables //======================================================================================================================================================150 // timer long long time0; long long time1; long long time2; // common variables int i; time0 = get_time(); //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 int threadsPerBlock; threadsPerBlock = order < 1024 ? order : 1024; { time1 = get_time(); //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 // private thread IDs int thid; int bid; // process number of querries #pragma omp parallel for private (i, thid) for(bid = 0; bid < count; bid++){ ____num_tasks[omp_get_thread_num()]++; { // process levels of the tree for(i = 0; i < maxheight; i++){ // process all leaves at each level for(thid = 0; thid < threadsPerBlock; thid++){ if((knodes[currKnode[bid]].keys[thid] <= start[bid]) && (knodes[currKnode[bid]].keys[thid+1] > start[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodes[currKnode[bid]].indices[thid] < knodes_elem){ offset[bid] = knodes[currKnode[bid]].indices[thid]; } } if((knodes[lastKnode[bid]].keys[thid] <= end[bid]) && (knodes[lastKnode[bid]].keys[thid+1] > end[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodes[lastKnode[bid]].indices[thid] < knodes_elem){ offset_2[bid] = knodes[lastKnode[bid]].indices[thid]; } } } // set for next tree level currKnode[bid] = offset[bid]; lastKnode[bid] = offset_2[bid]; } // process leaves for(thid = 0; thid < threadsPerBlock; thid++){ // Find the index of the starting record if(knodes[currKnode[bid]].keys[thid] == start[bid]){ recstart[bid] = knodes[currKnode[bid]].indices[thid]; } } // process leaves for(thid = 0; thid < threadsPerBlock; thid++){ // Find the index of the ending record if(knodes[lastKnode[bid]].keys[thid] == end[bid]){ reclength[bid] = knodes[lastKnode[bid]].indices[thid] - recstart[bid]+1; } } } ; } time2 = get_time(); } //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Time spent in different stages of CPU/MCPU KERNEL:\n"); printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time2-time0) * 100); printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time2-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time2-time0) / 1000000); } // main //========================================================================================================================================================================================================200 // END //========================================================================================================================================================================================================200 // #ifdef __cplusplus // } // #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-4,8),ceild(4*t2-Nz-3,16));t3<=min(min(floord(4*Nt+Ny-9,16),floord(2*t1+Ny-3,16)),floord(4*t2+Ny-9,16));t3++) { for (t4=max(max(ceild(t1-124,128),ceild(4*t2-Nz-243,256)),ceild(16*t3-Ny-243,256));t4<=min(min(min(floord(4*Nt+Nx-9,256),floord(2*t1+Nx-3,256)),floord(4*t2+Nx-9,256)),floord(16*t3+Nx+3,256));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(256*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
elastic-so12-mpi.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "mpi.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct neighborhood { int lll, llc, llr, lcl, lcc, lcr, lrl, lrc, lrr; int cll, clc, clr, ccl, ccc, ccr, crl, crc, crr; int rll, rlc, rlr, rcl, rcc, rcr, rrl, rrc, rrr; } ; struct profiler { double section0; double section1; double section2; double section3; } ; void sendrecv_txyz(struct dataobj *restrict a_vec, const int buf_x_size, const int buf_y_size, const int buf_z_size, int ogtime, int ogx, int ogy, int ogz, int ostime, int osx, int osy, int osz, int fromrank, int torank, MPI_Comm comm, const int nthreads); void gather_txyz(float *restrict buf_vec, const int buf_x_size, const int buf_y_size, const int buf_z_size, struct dataobj *restrict a_vec, int otime, int ox, int oy, int oz, const int nthreads); void scatter_txyz(float *restrict buf_vec, const int buf_x_size, const int buf_y_size, const int buf_z_size, struct dataobj *restrict a_vec, int otime, int ox, int oy, int oz, const int nthreads); void haloupdate_7(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads); void haloupdate_0(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads); void haloupdate_1(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads); void haloupdate_2(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads); void haloupdate_3(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads); void haloupdate_4(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads); void haloupdate_5(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads); void haloupdate_6(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads); void bf0(struct dataobj *restrict damp_vec, struct dataobj *restrict irho_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int t0, const int t1, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads); void bf1(struct dataobj *restrict damp_vec, struct dataobj *restrict lam_vec, struct dataobj *restrict mu_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int t0, const int t1, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads); int ForwardElastic(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict irho_vec, struct dataobj *restrict lam_vec, struct dataobj *restrict mu_vec, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec1_vec, struct dataobj *restrict rec1_coords_vec, struct dataobj *restrict rec2_vec, struct dataobj *restrict rec2_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec1_M, const int p_rec1_m, const int p_rec2_M, const int p_rec2_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers, const int x0_blk0_size, const int x1_blk0_size, const int y0_blk0_size, const int y1_blk0_size, MPI_Comm comm, struct neighborhood * nb, const int nthreads, const int nthreads_nonaffine) { float (*restrict rec1)[rec1_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec1_vec->size[1]]) rec1_vec->data; float (*restrict rec1_coords)[rec1_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec1_coords_vec->size[1]]) rec1_coords_vec->data; float (*restrict rec2)[rec2_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec2_vec->size[1]]) rec2_vec->data; float (*restrict rec2_coords)[rec2_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec2_coords_vec->size[1]]) rec2_coords_vec->data; float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data; float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data; float (*restrict tau_xx)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]]) tau_xx_vec->data; float (*restrict tau_yy)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]]) tau_yy_vec->data; float (*restrict tau_zz)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]]) tau_zz_vec->data; float (*restrict v_x)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]]) v_x_vec->data; float (*restrict v_y)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]]) v_y_vec->data; float (*restrict v_z)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]]) v_z_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); for (int time = time_m, t0 = (time)%(2), t1 = (time + 1)%(2); time <= time_M; time += 1, t0 = (time)%(2), t1 = (time + 1)%(2)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ haloupdate_0(tau_xx_vec,comm,nb,t0,nthreads); haloupdate_1(tau_xy_vec,comm,nb,t0,nthreads); haloupdate_2(tau_xz_vec,comm,nb,t0,nthreads); haloupdate_3(tau_yy_vec,comm,nb,t0,nthreads); haloupdate_4(tau_yz_vec,comm,nb,t0,nthreads); haloupdate_5(tau_zz_vec,comm,nb,t0,nthreads); bf0(damp_vec,irho_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,x0_blk0_size,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,y0_blk0_size,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads); bf0(damp_vec,irho_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,x0_blk0_size,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,(y_M - y_m + 1)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads); bf0(damp_vec,irho_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,(x_M - x_m + 1)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,y0_blk0_size,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads); bf0(damp_vec,irho_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,(x_M - x_m + 1)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,(y_M - y_m + 1)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads); haloupdate_6(v_x_vec,comm,nb,t1,nthreads); haloupdate_6(v_y_vec,comm,nb,t1,nthreads); haloupdate_6(v_z_vec,comm,nb,t1,nthreads); bf1(damp_vec,lam_vec,mu_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,x1_blk0_size,x_M - (x_M - x_m + 1)%(x1_blk0_size),x_m,y1_blk0_size,y_M - (y_M - y_m + 1)%(y1_blk0_size),y_m,z_M,z_m,nthreads); bf1(damp_vec,lam_vec,mu_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,x1_blk0_size,x_M - (x_M - x_m + 1)%(x1_blk0_size),x_m,(y_M - y_m + 1)%(y1_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y1_blk0_size) + 1,z_M,z_m,nthreads); bf1(damp_vec,lam_vec,mu_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,(x_M - x_m + 1)%(x1_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x1_blk0_size) + 1,y1_blk0_size,y_M - (y_M - y_m + 1)%(y1_blk0_size),y_m,z_M,z_m,nthreads); bf1(damp_vec,lam_vec,mu_vec,tau_xx_vec,tau_xy_vec,tau_xz_vec,tau_yy_vec,tau_yz_vec,tau_zz_vec,v_x_vec,v_y_vec,v_z_vec,t0,t1,(x_M - x_m + 1)%(x1_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x1_blk0_size) + 1,(y_M - y_m + 1)%(y1_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y1_blk0_size) + 1,z_M,z_m,nthreads); /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ #pragma omp parallel num_threads(nthreads_nonaffine) { int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_src_M - p_src_m + 1)/nthreads_nonaffine)); #pragma omp for collapse(1) schedule(dynamic,chunk_size) for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1) { int ii_src_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])); int ii_src_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])); int ii_src_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])); int ii_src_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])) + 1; int ii_src_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])) + 1; int ii_src_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])) + 1; float px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*src_coords[p_src][0])) + src_coords[p_src][0]); float py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*src_coords[p_src][1])) + src_coords[p_src][1]); float pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r0 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src]; #pragma omp atomic update tau_xx[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r0; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r1 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src]; #pragma omp atomic update tau_xx[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r1; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r2 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src]; #pragma omp atomic update tau_xx[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r2; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r3 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src]; #pragma omp atomic update tau_xx[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r3; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r4 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src]; #pragma omp atomic update tau_xx[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r4; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r5 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src]; #pragma omp atomic update tau_xx[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r5; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r6 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src]; #pragma omp atomic update tau_xx[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r6; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r7 = 1.0e-3F*px*py*pz*dt*src[time][p_src]; #pragma omp atomic update tau_xx[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r7; } ii_src_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])); ii_src_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])); ii_src_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])); ii_src_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])) + 1; ii_src_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])) + 1; ii_src_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])) + 1; px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*src_coords[p_src][0])) + src_coords[p_src][0]); py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*src_coords[p_src][1])) + src_coords[p_src][1]); pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r8 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src]; #pragma omp atomic update tau_zz[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r8; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r9 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src]; #pragma omp atomic update tau_zz[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r9; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r10 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src]; #pragma omp atomic update tau_zz[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r10; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r11 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src]; #pragma omp atomic update tau_zz[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r11; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r12 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src]; #pragma omp atomic update tau_zz[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r12; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r13 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src]; #pragma omp atomic update tau_zz[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r13; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r14 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src]; #pragma omp atomic update tau_zz[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r14; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r15 = 1.0e-3F*px*py*pz*dt*src[time][p_src]; #pragma omp atomic update tau_zz[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r15; } ii_src_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])); ii_src_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])); ii_src_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])); ii_src_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])) + 1; ii_src_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])) + 1; ii_src_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])) + 1; px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*src_coords[p_src][0])) + src_coords[p_src][0]); py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*src_coords[p_src][1])) + src_coords[p_src][1]); pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r16 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src]; #pragma omp atomic update tau_yy[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r16; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r17 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src]; #pragma omp atomic update tau_yy[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r17; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r18 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src]; #pragma omp atomic update tau_yy[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r18; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r19 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src]; #pragma omp atomic update tau_yy[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r19; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r20 = dt*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src]; #pragma omp atomic update tau_yy[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r20; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r21 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src]; #pragma omp atomic update tau_yy[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r21; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r22 = dt*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src]; #pragma omp atomic update tau_yy[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r22; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r23 = 1.0e-3F*px*py*pz*dt*src[time][p_src]; #pragma omp atomic update tau_yy[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r23; } } } /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000; struct timeval start_section2, end_section2; gettimeofday(&start_section2, NULL); /* Begin section2 */ #pragma omp parallel num_threads(nthreads_nonaffine) { int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_rec1_M - p_rec1_m + 1)/nthreads_nonaffine)); #pragma omp for collapse(1) schedule(dynamic,chunk_size) for (int p_rec1 = p_rec1_m; p_rec1 <= p_rec1_M; p_rec1 += 1) { int ii_rec1_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec1_coords[p_rec1][0])); int ii_rec1_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec1_coords[p_rec1][1])); int ii_rec1_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec1_coords[p_rec1][2])); int ii_rec1_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec1_coords[p_rec1][2])) + 1; int ii_rec1_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec1_coords[p_rec1][1])) + 1; int ii_rec1_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec1_coords[p_rec1][0])) + 1; float px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*rec1_coords[p_rec1][0])) + rec1_coords[p_rec1][0]); float py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*rec1_coords[p_rec1][1])) + rec1_coords[p_rec1][1]); float pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*rec1_coords[p_rec1][2])) + rec1_coords[p_rec1][2]); float sum = 0.0F; if (ii_rec1_0 >= x_m - 1 && ii_rec1_1 >= y_m - 1 && ii_rec1_2 >= z_m - 1 && ii_rec1_0 <= x_M + 1 && ii_rec1_1 <= y_M + 1 && ii_rec1_2 <= z_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*tau_zz[t0][ii_rec1_0 + 12][ii_rec1_1 + 12][ii_rec1_2 + 12]; } if (ii_rec1_0 >= x_m - 1 && ii_rec1_1 >= y_m - 1 && ii_rec1_3 >= z_m - 1 && ii_rec1_0 <= x_M + 1 && ii_rec1_1 <= y_M + 1 && ii_rec1_3 <= z_M + 1) { sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*tau_zz[t0][ii_rec1_0 + 12][ii_rec1_1 + 12][ii_rec1_3 + 12]; } if (ii_rec1_0 >= x_m - 1 && ii_rec1_2 >= z_m - 1 && ii_rec1_4 >= y_m - 1 && ii_rec1_0 <= x_M + 1 && ii_rec1_2 <= z_M + 1 && ii_rec1_4 <= y_M + 1) { sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*tau_zz[t0][ii_rec1_0 + 12][ii_rec1_4 + 12][ii_rec1_2 + 12]; } if (ii_rec1_0 >= x_m - 1 && ii_rec1_3 >= z_m - 1 && ii_rec1_4 >= y_m - 1 && ii_rec1_0 <= x_M + 1 && ii_rec1_3 <= z_M + 1 && ii_rec1_4 <= y_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*tau_zz[t0][ii_rec1_0 + 12][ii_rec1_4 + 12][ii_rec1_3 + 12]; } if (ii_rec1_1 >= y_m - 1 && ii_rec1_2 >= z_m - 1 && ii_rec1_5 >= x_m - 1 && ii_rec1_1 <= y_M + 1 && ii_rec1_2 <= z_M + 1 && ii_rec1_5 <= x_M + 1) { sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*tau_zz[t0][ii_rec1_5 + 12][ii_rec1_1 + 12][ii_rec1_2 + 12]; } if (ii_rec1_1 >= y_m - 1 && ii_rec1_3 >= z_m - 1 && ii_rec1_5 >= x_m - 1 && ii_rec1_1 <= y_M + 1 && ii_rec1_3 <= z_M + 1 && ii_rec1_5 <= x_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*tau_zz[t0][ii_rec1_5 + 12][ii_rec1_1 + 12][ii_rec1_3 + 12]; } if (ii_rec1_2 >= z_m - 1 && ii_rec1_4 >= y_m - 1 && ii_rec1_5 >= x_m - 1 && ii_rec1_2 <= z_M + 1 && ii_rec1_4 <= y_M + 1 && ii_rec1_5 <= x_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*tau_zz[t0][ii_rec1_5 + 12][ii_rec1_4 + 12][ii_rec1_2 + 12]; } if (ii_rec1_3 >= z_m - 1 && ii_rec1_4 >= y_m - 1 && ii_rec1_5 >= x_m - 1 && ii_rec1_3 <= z_M + 1 && ii_rec1_4 <= y_M + 1 && ii_rec1_5 <= x_M + 1) { sum += 1.0e-3F*px*py*pz*tau_zz[t0][ii_rec1_5 + 12][ii_rec1_4 + 12][ii_rec1_3 + 12]; } rec1[time][p_rec1] = sum; } } /* End section2 */ gettimeofday(&end_section2, NULL); timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000; struct timeval start_section3, end_section3; gettimeofday(&start_section3, NULL); /* Begin section3 */ haloupdate_7(v_x_vec,comm,nb,t0,nthreads); haloupdate_7(v_y_vec,comm,nb,t0,nthreads); haloupdate_7(v_z_vec,comm,nb,t0,nthreads); #pragma omp parallel num_threads(nthreads_nonaffine) { int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_rec2_M - p_rec2_m + 1)/nthreads_nonaffine)); #pragma omp for collapse(1) schedule(dynamic,chunk_size) for (int p_rec2 = p_rec2_m; p_rec2 <= p_rec2_M; p_rec2 += 1) { int ii_rec2_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec2_coords[p_rec2][0])); int ii_rec2_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec2_coords[p_rec2][1])); int ii_rec2_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec2_coords[p_rec2][2])); int ii_rec2_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec2_coords[p_rec2][2])) + 1; int ii_rec2_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec2_coords[p_rec2][1])) + 1; int ii_rec2_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec2_coords[p_rec2][0])) + 1; float px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*rec2_coords[p_rec2][0])) + rec2_coords[p_rec2][0]); float py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*rec2_coords[p_rec2][1])) + rec2_coords[p_rec2][1]); float pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*rec2_coords[p_rec2][2])) + rec2_coords[p_rec2][2]); float sum = 0.0F; if (ii_rec2_0 >= x_m - 1 && ii_rec2_1 >= y_m - 1 && ii_rec2_2 >= z_m - 1 && ii_rec2_0 <= x_M + 1 && ii_rec2_1 <= y_M + 1 && ii_rec2_2 <= z_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*(1.80375183e-5F*v_x[t0][ii_rec2_0 + 6][ii_rec2_1 + 12][ii_rec2_2 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_0 + 7][ii_rec2_1 + 12][ii_rec2_2 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_0 + 8][ii_rec2_1 + 12][ii_rec2_2 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_0 + 9][ii_rec2_1 + 12][ii_rec2_2 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_0 + 10][ii_rec2_1 + 12][ii_rec2_2 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_0 + 11][ii_rec2_1 + 12][ii_rec2_2 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_0 + 13][ii_rec2_1 + 12][ii_rec2_2 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_0 + 14][ii_rec2_1 + 12][ii_rec2_2 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_0 + 15][ii_rec2_1 + 12][ii_rec2_2 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_0 + 16][ii_rec2_1 + 12][ii_rec2_2 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_0 + 17][ii_rec2_1 + 12][ii_rec2_2 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_0 + 18][ii_rec2_1 + 12][ii_rec2_2 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 6][ii_rec2_2 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 7][ii_rec2_2 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 8][ii_rec2_2 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 9][ii_rec2_2 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 10][ii_rec2_2 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 11][ii_rec2_2 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 13][ii_rec2_2 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 14][ii_rec2_2 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 15][ii_rec2_2 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 16][ii_rec2_2 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 17][ii_rec2_2 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 18][ii_rec2_2 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_2 + 18]); } if (ii_rec2_0 >= x_m - 1 && ii_rec2_1 >= y_m - 1 && ii_rec2_3 >= z_m - 1 && ii_rec2_0 <= x_M + 1 && ii_rec2_1 <= y_M + 1 && ii_rec2_3 <= z_M + 1) { sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*(1.80375183e-5F*v_x[t0][ii_rec2_0 + 6][ii_rec2_1 + 12][ii_rec2_3 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_0 + 7][ii_rec2_1 + 12][ii_rec2_3 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_0 + 8][ii_rec2_1 + 12][ii_rec2_3 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_0 + 9][ii_rec2_1 + 12][ii_rec2_3 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_0 + 10][ii_rec2_1 + 12][ii_rec2_3 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_0 + 11][ii_rec2_1 + 12][ii_rec2_3 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_0 + 13][ii_rec2_1 + 12][ii_rec2_3 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_0 + 14][ii_rec2_1 + 12][ii_rec2_3 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_0 + 15][ii_rec2_1 + 12][ii_rec2_3 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_0 + 16][ii_rec2_1 + 12][ii_rec2_3 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_0 + 17][ii_rec2_1 + 12][ii_rec2_3 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_0 + 18][ii_rec2_1 + 12][ii_rec2_3 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 6][ii_rec2_3 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 7][ii_rec2_3 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 8][ii_rec2_3 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 9][ii_rec2_3 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 10][ii_rec2_3 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 11][ii_rec2_3 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 13][ii_rec2_3 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 14][ii_rec2_3 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 15][ii_rec2_3 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 16][ii_rec2_3 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 17][ii_rec2_3 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_1 + 18][ii_rec2_3 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_1 + 12][ii_rec2_3 + 18]); } if (ii_rec2_0 >= x_m - 1 && ii_rec2_2 >= z_m - 1 && ii_rec2_4 >= y_m - 1 && ii_rec2_0 <= x_M + 1 && ii_rec2_2 <= z_M + 1 && ii_rec2_4 <= y_M + 1) { sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*(1.80375183e-5F*v_x[t0][ii_rec2_0 + 6][ii_rec2_4 + 12][ii_rec2_2 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_0 + 7][ii_rec2_4 + 12][ii_rec2_2 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_0 + 8][ii_rec2_4 + 12][ii_rec2_2 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_0 + 9][ii_rec2_4 + 12][ii_rec2_2 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_0 + 10][ii_rec2_4 + 12][ii_rec2_2 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_0 + 11][ii_rec2_4 + 12][ii_rec2_2 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_0 + 13][ii_rec2_4 + 12][ii_rec2_2 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_0 + 14][ii_rec2_4 + 12][ii_rec2_2 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_0 + 15][ii_rec2_4 + 12][ii_rec2_2 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_0 + 16][ii_rec2_4 + 12][ii_rec2_2 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_0 + 17][ii_rec2_4 + 12][ii_rec2_2 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_0 + 18][ii_rec2_4 + 12][ii_rec2_2 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 6][ii_rec2_2 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 7][ii_rec2_2 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 8][ii_rec2_2 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 9][ii_rec2_2 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 10][ii_rec2_2 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 11][ii_rec2_2 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 13][ii_rec2_2 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 14][ii_rec2_2 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 15][ii_rec2_2 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 16][ii_rec2_2 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 17][ii_rec2_2 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 18][ii_rec2_2 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_2 + 18]); } if (ii_rec2_0 >= x_m - 1 && ii_rec2_3 >= z_m - 1 && ii_rec2_4 >= y_m - 1 && ii_rec2_0 <= x_M + 1 && ii_rec2_3 <= z_M + 1 && ii_rec2_4 <= y_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*(1.80375183e-5F*v_x[t0][ii_rec2_0 + 6][ii_rec2_4 + 12][ii_rec2_3 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_0 + 7][ii_rec2_4 + 12][ii_rec2_3 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_0 + 8][ii_rec2_4 + 12][ii_rec2_3 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_0 + 9][ii_rec2_4 + 12][ii_rec2_3 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_0 + 10][ii_rec2_4 + 12][ii_rec2_3 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_0 + 11][ii_rec2_4 + 12][ii_rec2_3 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_0 + 13][ii_rec2_4 + 12][ii_rec2_3 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_0 + 14][ii_rec2_4 + 12][ii_rec2_3 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_0 + 15][ii_rec2_4 + 12][ii_rec2_3 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_0 + 16][ii_rec2_4 + 12][ii_rec2_3 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_0 + 17][ii_rec2_4 + 12][ii_rec2_3 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_0 + 18][ii_rec2_4 + 12][ii_rec2_3 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 6][ii_rec2_3 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 7][ii_rec2_3 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 8][ii_rec2_3 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 9][ii_rec2_3 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 10][ii_rec2_3 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 11][ii_rec2_3 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 13][ii_rec2_3 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 14][ii_rec2_3 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 15][ii_rec2_3 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 16][ii_rec2_3 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 17][ii_rec2_3 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_0 + 12][ii_rec2_4 + 18][ii_rec2_3 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_0 + 12][ii_rec2_4 + 12][ii_rec2_3 + 18]); } if (ii_rec2_1 >= y_m - 1 && ii_rec2_2 >= z_m - 1 && ii_rec2_5 >= x_m - 1 && ii_rec2_1 <= y_M + 1 && ii_rec2_2 <= z_M + 1 && ii_rec2_5 <= x_M + 1) { sum += (1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*(1.80375183e-5F*v_x[t0][ii_rec2_5 + 6][ii_rec2_1 + 12][ii_rec2_2 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_5 + 7][ii_rec2_1 + 12][ii_rec2_2 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_5 + 8][ii_rec2_1 + 12][ii_rec2_2 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_5 + 9][ii_rec2_1 + 12][ii_rec2_2 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_5 + 10][ii_rec2_1 + 12][ii_rec2_2 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_5 + 11][ii_rec2_1 + 12][ii_rec2_2 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_5 + 13][ii_rec2_1 + 12][ii_rec2_2 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_5 + 14][ii_rec2_1 + 12][ii_rec2_2 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_5 + 15][ii_rec2_1 + 12][ii_rec2_2 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_5 + 16][ii_rec2_1 + 12][ii_rec2_2 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_5 + 17][ii_rec2_1 + 12][ii_rec2_2 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_5 + 18][ii_rec2_1 + 12][ii_rec2_2 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 6][ii_rec2_2 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 7][ii_rec2_2 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 8][ii_rec2_2 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 9][ii_rec2_2 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 10][ii_rec2_2 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 11][ii_rec2_2 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 13][ii_rec2_2 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 14][ii_rec2_2 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 15][ii_rec2_2 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 16][ii_rec2_2 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 17][ii_rec2_2 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 18][ii_rec2_2 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_2 + 18]); } if (ii_rec2_1 >= y_m - 1 && ii_rec2_3 >= z_m - 1 && ii_rec2_5 >= x_m - 1 && ii_rec2_1 <= y_M + 1 && ii_rec2_3 <= z_M + 1 && ii_rec2_5 <= x_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*(1.80375183e-5F*v_x[t0][ii_rec2_5 + 6][ii_rec2_1 + 12][ii_rec2_3 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_5 + 7][ii_rec2_1 + 12][ii_rec2_3 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_5 + 8][ii_rec2_1 + 12][ii_rec2_3 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_5 + 9][ii_rec2_1 + 12][ii_rec2_3 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_5 + 10][ii_rec2_1 + 12][ii_rec2_3 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_5 + 11][ii_rec2_1 + 12][ii_rec2_3 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_5 + 13][ii_rec2_1 + 12][ii_rec2_3 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_5 + 14][ii_rec2_1 + 12][ii_rec2_3 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_5 + 15][ii_rec2_1 + 12][ii_rec2_3 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_5 + 16][ii_rec2_1 + 12][ii_rec2_3 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_5 + 17][ii_rec2_1 + 12][ii_rec2_3 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_5 + 18][ii_rec2_1 + 12][ii_rec2_3 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 6][ii_rec2_3 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 7][ii_rec2_3 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 8][ii_rec2_3 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 9][ii_rec2_3 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 10][ii_rec2_3 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 11][ii_rec2_3 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 13][ii_rec2_3 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 14][ii_rec2_3 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 15][ii_rec2_3 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 16][ii_rec2_3 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 17][ii_rec2_3 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_1 + 18][ii_rec2_3 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_1 + 12][ii_rec2_3 + 18]); } if (ii_rec2_2 >= z_m - 1 && ii_rec2_4 >= y_m - 1 && ii_rec2_5 >= x_m - 1 && ii_rec2_2 <= z_M + 1 && ii_rec2_4 <= y_M + 1 && ii_rec2_5 <= x_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*(1.80375183e-5F*v_x[t0][ii_rec2_5 + 6][ii_rec2_4 + 12][ii_rec2_2 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_5 + 7][ii_rec2_4 + 12][ii_rec2_2 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_5 + 8][ii_rec2_4 + 12][ii_rec2_2 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_5 + 9][ii_rec2_4 + 12][ii_rec2_2 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_5 + 10][ii_rec2_4 + 12][ii_rec2_2 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_5 + 11][ii_rec2_4 + 12][ii_rec2_2 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_5 + 13][ii_rec2_4 + 12][ii_rec2_2 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_5 + 14][ii_rec2_4 + 12][ii_rec2_2 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_5 + 15][ii_rec2_4 + 12][ii_rec2_2 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_5 + 16][ii_rec2_4 + 12][ii_rec2_2 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_5 + 17][ii_rec2_4 + 12][ii_rec2_2 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_5 + 18][ii_rec2_4 + 12][ii_rec2_2 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 6][ii_rec2_2 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 7][ii_rec2_2 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 8][ii_rec2_2 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 9][ii_rec2_2 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 10][ii_rec2_2 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 11][ii_rec2_2 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 13][ii_rec2_2 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 14][ii_rec2_2 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 15][ii_rec2_2 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 16][ii_rec2_2 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 17][ii_rec2_2 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 18][ii_rec2_2 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_2 + 18]); } if (ii_rec2_3 >= z_m - 1 && ii_rec2_4 >= y_m - 1 && ii_rec2_5 >= x_m - 1 && ii_rec2_3 <= z_M + 1 && ii_rec2_4 <= y_M + 1 && ii_rec2_5 <= x_M + 1) { sum += 1.0e-3F*px*py*pz*(1.80375183e-5F*v_x[t0][ii_rec2_5 + 6][ii_rec2_4 + 12][ii_rec2_3 + 12] - 2.59740264e-4F*v_x[t0][ii_rec2_5 + 7][ii_rec2_4 + 12][ii_rec2_3 + 12] + 1.78571431e-3F*v_x[t0][ii_rec2_5 + 8][ii_rec2_4 + 12][ii_rec2_3 + 12] - 7.93650805e-3F*v_x[t0][ii_rec2_5 + 9][ii_rec2_4 + 12][ii_rec2_3 + 12] + 2.67857147e-2F*v_x[t0][ii_rec2_5 + 10][ii_rec2_4 + 12][ii_rec2_3 + 12] - 8.5714287e-2F*v_x[t0][ii_rec2_5 + 11][ii_rec2_4 + 12][ii_rec2_3 + 12] + 8.5714287e-2F*v_x[t0][ii_rec2_5 + 13][ii_rec2_4 + 12][ii_rec2_3 + 12] - 2.67857147e-2F*v_x[t0][ii_rec2_5 + 14][ii_rec2_4 + 12][ii_rec2_3 + 12] + 7.93650805e-3F*v_x[t0][ii_rec2_5 + 15][ii_rec2_4 + 12][ii_rec2_3 + 12] - 1.78571431e-3F*v_x[t0][ii_rec2_5 + 16][ii_rec2_4 + 12][ii_rec2_3 + 12] + 2.59740264e-4F*v_x[t0][ii_rec2_5 + 17][ii_rec2_4 + 12][ii_rec2_3 + 12] - 1.80375183e-5F*v_x[t0][ii_rec2_5 + 18][ii_rec2_4 + 12][ii_rec2_3 + 12] + 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 6][ii_rec2_3 + 12] - 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 7][ii_rec2_3 + 12] + 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 8][ii_rec2_3 + 12] - 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 9][ii_rec2_3 + 12] + 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 10][ii_rec2_3 + 12] - 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 11][ii_rec2_3 + 12] + 8.5714287e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 13][ii_rec2_3 + 12] - 2.67857147e-2F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 14][ii_rec2_3 + 12] + 7.93650805e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 15][ii_rec2_3 + 12] - 1.78571431e-3F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 16][ii_rec2_3 + 12] + 2.59740264e-4F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 17][ii_rec2_3 + 12] - 1.80375183e-5F*v_y[t0][ii_rec2_5 + 12][ii_rec2_4 + 18][ii_rec2_3 + 12] + 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 6] - 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 7] + 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 8] - 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 9] + 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 10] - 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 11] + 8.5714287e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 13] - 2.67857147e-2F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 14] + 7.93650805e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 15] - 1.78571431e-3F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 16] + 2.59740264e-4F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 17] - 1.80375183e-5F*v_z[t0][ii_rec2_5 + 12][ii_rec2_4 + 12][ii_rec2_3 + 18]); } rec2[time][p_rec2] = sum; } } /* End section3 */ gettimeofday(&end_section3, NULL); timers->section3 += (double)(end_section3.tv_sec-start_section3.tv_sec)+(double)(end_section3.tv_usec-start_section3.tv_usec)/1000000; } return 0; } void sendrecv_txyz(struct dataobj *restrict a_vec, const int buf_x_size, const int buf_y_size, const int buf_z_size, int ogtime, int ogx, int ogy, int ogz, int ostime, int osx, int osy, int osz, int fromrank, int torank, MPI_Comm comm, const int nthreads) { float (*bufs)[buf_y_size][buf_z_size]; posix_memalign((void**)&bufs, 64, sizeof(float[buf_x_size][buf_y_size][buf_z_size])); float (*bufg)[buf_y_size][buf_z_size]; posix_memalign((void**)&bufg, 64, sizeof(float[buf_x_size][buf_y_size][buf_z_size])); MPI_Request rrecv; MPI_Request rsend; MPI_Irecv((float *)bufs,buf_x_size*buf_y_size*buf_z_size,MPI_FLOAT,fromrank,13,comm,&rrecv); if (torank != MPI_PROC_NULL) { gather_txyz((float *)bufg,buf_x_size,buf_y_size,buf_z_size,a_vec,ogtime,ogx,ogy,ogz,nthreads); } MPI_Isend((float *)bufg,buf_x_size*buf_y_size*buf_z_size,MPI_FLOAT,torank,13,comm,&rsend); MPI_Wait(&rsend,MPI_STATUS_IGNORE); MPI_Wait(&rrecv,MPI_STATUS_IGNORE); if (fromrank != MPI_PROC_NULL) { scatter_txyz((float *)bufs,buf_x_size,buf_y_size,buf_z_size,a_vec,ostime,osx,osy,osz,nthreads); } free(bufs); free(bufg); } void gather_txyz(float *restrict buf_vec, const int buf_x_size, const int buf_y_size, const int buf_z_size, struct dataobj *restrict a_vec, int otime, int ox, int oy, int oz, const int nthreads) { float (*restrict buf)[buf_y_size][buf_z_size] __attribute__ ((aligned (64))) = (float (*)[buf_y_size][buf_z_size]) buf_vec; float (*restrict a)[a_vec->size[1]][a_vec->size[2]][a_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[a_vec->size[1]][a_vec->size[2]][a_vec->size[3]]) a_vec->data; #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(1) schedule(dynamic,1) for (int x = 0; x <= buf_x_size - 1; x += 1) { for (int y = 0; y <= buf_y_size - 1; y += 1) { #pragma omp simd aligned(a:32) for (int z = 0; z <= buf_z_size - 1; z += 1) { buf[x][y][z] = a[otime][x + ox][y + oy][z + oz]; } } } } } void scatter_txyz(float *restrict buf_vec, const int buf_x_size, const int buf_y_size, const int buf_z_size, struct dataobj *restrict a_vec, int otime, int ox, int oy, int oz, const int nthreads) { float (*restrict buf)[buf_y_size][buf_z_size] __attribute__ ((aligned (64))) = (float (*)[buf_y_size][buf_z_size]) buf_vec; float (*restrict a)[a_vec->size[1]][a_vec->size[2]][a_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[a_vec->size[1]][a_vec->size[2]][a_vec->size[3]]) a_vec->data; #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(1) schedule(dynamic,1) for (int x = 0; x <= buf_x_size - 1; x += 1) { for (int y = 0; y <= buf_y_size - 1; y += 1) { #pragma omp simd aligned(a:32) for (int z = 0; z <= buf_z_size - 1; z += 1) { a[otime][x + ox][y + oy][z + oz] = buf[x][y][z]; } } } } } void haloupdate_7(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads) { sendrecv_txyz(a_vec,a_vec->hsize[3],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[2],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[3],a_vec->hofs[4],a_vec->hofs[6],nb->rcc,nb->lcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->hsize[2],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[3],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->lcc,nb->rcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[5],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[5],a_vec->hofs[6],nb->crc,nb->clc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[4],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[5],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->clc,nb->crc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[7],nb->ccr,nb->ccl,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->ccl,nb->ccr,comm,nthreads); } void haloupdate_0(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads) { sendrecv_txyz(a_vec,a_vec->hsize[3],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[2],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[3],a_vec->hofs[4],a_vec->hofs[6],nb->rcc,nb->lcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->hsize[2],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[3],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->lcc,nb->rcc,comm,nthreads); } void haloupdate_1(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads) { sendrecv_txyz(a_vec,a_vec->hsize[3],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[2],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[3],a_vec->hofs[4],a_vec->hofs[6],nb->rcc,nb->lcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->hsize[2],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[3],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->lcc,nb->rcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[5],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[5],a_vec->hofs[6],nb->crc,nb->clc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[4],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[5],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->clc,nb->crc,comm,nthreads); } void haloupdate_2(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads) { sendrecv_txyz(a_vec,a_vec->hsize[3],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[2],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[3],a_vec->hofs[4],a_vec->hofs[6],nb->rcc,nb->lcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->hsize[2],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[3],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->lcc,nb->rcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[7],nb->ccr,nb->ccl,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->ccl,nb->ccr,comm,nthreads); } void haloupdate_3(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads) { sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[5],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[5],a_vec->hofs[6],nb->crc,nb->clc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[4],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[5],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->clc,nb->crc,comm,nthreads); } void haloupdate_4(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads) { sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[5],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[5],a_vec->hofs[6],nb->crc,nb->clc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[4],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[5],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->clc,nb->crc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[7],nb->ccr,nb->ccl,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->ccl,nb->ccr,comm,nthreads); } void haloupdate_5(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads) { sendrecv_txyz(a_vec,a_vec->hsize[3],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[2],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[3],a_vec->hofs[4],a_vec->hofs[6],nb->rcc,nb->lcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->hsize[2],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[3],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->lcc,nb->rcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[5],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[5],a_vec->hofs[6],nb->crc,nb->clc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[4],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[5],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->clc,nb->crc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[7],nb->ccr,nb->ccl,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->ccl,nb->ccr,comm,nthreads); } void haloupdate_6(struct dataobj *restrict a_vec, MPI_Comm comm, struct neighborhood * nb, int otime, const int nthreads) { sendrecv_txyz(a_vec,a_vec->hsize[3],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[2],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[3],a_vec->hofs[4],a_vec->hofs[6],nb->rcc,nb->lcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->hsize[2],a_vec->npsize[2],a_vec->npsize[3],otime,a_vec->oofs[3],a_vec->hofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->lcc,nb->rcc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[5],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[4],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[5],a_vec->hofs[6],nb->crc,nb->clc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->hsize[4],a_vec->npsize[3],otime,a_vec->hofs[2],a_vec->oofs[5],a_vec->hofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->clc,nb->crc,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[7],nb->ccr,nb->ccl,comm,nthreads); sendrecv_txyz(a_vec,a_vec->npsize[1],a_vec->npsize[2],a_vec->hsize[6],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->oofs[7],otime,a_vec->hofs[2],a_vec->hofs[4],a_vec->hofs[6],nb->ccl,nb->ccr,comm,nthreads); } void bf0(struct dataobj *restrict damp_vec, struct dataobj *restrict irho_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int t0, const int t1, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict irho)[irho_vec->size[1]][irho_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[irho_vec->size[1]][irho_vec->size[2]]) irho_vec->data; float (*restrict tau_xx)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]]) tau_xx_vec->data; float (*restrict tau_xy)[tau_xy_vec->size[1]][tau_xy_vec->size[2]][tau_xy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xy_vec->size[1]][tau_xy_vec->size[2]][tau_xy_vec->size[3]]) tau_xy_vec->data; float (*restrict tau_xz)[tau_xz_vec->size[1]][tau_xz_vec->size[2]][tau_xz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xz_vec->size[1]][tau_xz_vec->size[2]][tau_xz_vec->size[3]]) tau_xz_vec->data; float (*restrict tau_yy)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]]) tau_yy_vec->data; float (*restrict tau_yz)[tau_yz_vec->size[1]][tau_yz_vec->size[2]][tau_yz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yz_vec->size[1]][tau_yz_vec->size[2]][tau_yz_vec->size[3]]) tau_yz_vec->data; float (*restrict tau_zz)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]]) tau_zz_vec->data; float (*restrict v_x)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]]) v_x_vec->data; float (*restrict v_y)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]]) v_y_vec->data; float (*restrict v_z)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]]) v_z_vec->data; if (x0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(1) schedule(dynamic,1) for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size) { for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= x0_blk0 + x0_blk0_size - 1; x += 1) { for (int y = y0_blk0; y <= y0_blk0 + y0_blk0_size - 1; y += 1) { #pragma omp simd aligned(damp,irho,tau_xx,tau_xy,tau_xz,tau_yy,tau_yz,tau_zz,v_x,v_y,v_z:32) for (int z = z_m; z <= z_M; z += 1) { v_x[t1][x + 12][y + 12][z + 12] = 7.00999975204468e-1F*(irho[x + 12][y + 12][z + 12] + irho[x + 13][y + 12][z + 12])*(2.18478119e-6F*(tau_xx[t0][x + 7][y + 12][z + 12] - tau_xx[t0][x + 18][y + 12][z + 12] + tau_xy[t0][x + 12][y + 6][z + 12] - tau_xy[t0][x + 12][y + 17][z + 12] + tau_xz[t0][x + 12][y + 12][z + 6] - tau_xz[t0][x + 12][y + 12][z + 17]) + 3.59005404e-5F*(-tau_xx[t0][x + 8][y + 12][z + 12] + tau_xx[t0][x + 17][y + 12][z + 12] - tau_xy[t0][x + 12][y + 7][z + 12] + tau_xy[t0][x + 12][y + 16][z + 12] - tau_xz[t0][x + 12][y + 12][z + 7] + tau_xz[t0][x + 12][y + 12][z + 16]) + 2.96728956e-4F*(tau_xx[t0][x + 9][y + 12][z + 12] - tau_xx[t0][x + 16][y + 12][z + 12] + tau_xy[t0][x + 12][y + 8][z + 12] - tau_xy[t0][x + 12][y + 15][z + 12] + tau_xz[t0][x + 12][y + 12][z + 8] - tau_xz[t0][x + 12][y + 12][z + 15]) + 1.74476626e-3F*(-tau_xx[t0][x + 10][y + 12][z + 12] + tau_xx[t0][x + 15][y + 12][z + 12] - tau_xy[t0][x + 12][y + 9][z + 12] + tau_xy[t0][x + 12][y + 14][z + 12] - tau_xz[t0][x + 12][y + 12][z + 9] + tau_xz[t0][x + 12][y + 12][z + 14]) + 9.6931459e-3F*(tau_xx[t0][x + 11][y + 12][z + 12] - tau_xx[t0][x + 14][y + 12][z + 12] + tau_xy[t0][x + 12][y + 10][z + 12] - tau_xy[t0][x + 12][y + 13][z + 12] + tau_xz[t0][x + 12][y + 12][z + 10] - tau_xz[t0][x + 12][y + 12][z + 13]) + 1.22133638e-1F*(-tau_xx[t0][x + 12][y + 12][z + 12] + tau_xx[t0][x + 13][y + 12][z + 12] - tau_xy[t0][x + 12][y + 11][z + 12] + tau_xy[t0][x + 12][y + 12][z + 12] - tau_xz[t0][x + 12][y + 12][z + 11] + tau_xz[t0][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*v_x[t0][x + 12][y + 12][z + 12]; v_y[t1][x + 12][y + 12][z + 12] = 7.00999975204468e-1F*(irho[x + 12][y + 12][z + 12] + irho[x + 12][y + 13][z + 12])*(2.18478119e-6F*(tau_xy[t0][x + 6][y + 12][z + 12] - tau_xy[t0][x + 17][y + 12][z + 12] + tau_yy[t0][x + 12][y + 7][z + 12] - tau_yy[t0][x + 12][y + 18][z + 12] + tau_yz[t0][x + 12][y + 12][z + 6] - tau_yz[t0][x + 12][y + 12][z + 17]) + 3.59005404e-5F*(-tau_xy[t0][x + 7][y + 12][z + 12] + tau_xy[t0][x + 16][y + 12][z + 12] - tau_yy[t0][x + 12][y + 8][z + 12] + tau_yy[t0][x + 12][y + 17][z + 12] - tau_yz[t0][x + 12][y + 12][z + 7] + tau_yz[t0][x + 12][y + 12][z + 16]) + 2.96728956e-4F*(tau_xy[t0][x + 8][y + 12][z + 12] - tau_xy[t0][x + 15][y + 12][z + 12] + tau_yy[t0][x + 12][y + 9][z + 12] - tau_yy[t0][x + 12][y + 16][z + 12] + tau_yz[t0][x + 12][y + 12][z + 8] - tau_yz[t0][x + 12][y + 12][z + 15]) + 1.74476626e-3F*(-tau_xy[t0][x + 9][y + 12][z + 12] + tau_xy[t0][x + 14][y + 12][z + 12] - tau_yy[t0][x + 12][y + 10][z + 12] + tau_yy[t0][x + 12][y + 15][z + 12] - tau_yz[t0][x + 12][y + 12][z + 9] + tau_yz[t0][x + 12][y + 12][z + 14]) + 9.6931459e-3F*(tau_xy[t0][x + 10][y + 12][z + 12] - tau_xy[t0][x + 13][y + 12][z + 12] + tau_yy[t0][x + 12][y + 11][z + 12] - tau_yy[t0][x + 12][y + 14][z + 12] + tau_yz[t0][x + 12][y + 12][z + 10] - tau_yz[t0][x + 12][y + 12][z + 13]) + 1.22133638e-1F*(-tau_xy[t0][x + 11][y + 12][z + 12] + tau_xy[t0][x + 12][y + 12][z + 12] - tau_yy[t0][x + 12][y + 12][z + 12] + tau_yy[t0][x + 12][y + 13][z + 12] - tau_yz[t0][x + 12][y + 12][z + 11] + tau_yz[t0][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*v_y[t0][x + 12][y + 12][z + 12]; v_z[t1][x + 12][y + 12][z + 12] = 7.00999975204468e-1F*(irho[x + 12][y + 12][z + 12] + irho[x + 12][y + 12][z + 13])*(2.18478119e-6F*(tau_xz[t0][x + 6][y + 12][z + 12] - tau_xz[t0][x + 17][y + 12][z + 12] + tau_yz[t0][x + 12][y + 6][z + 12] - tau_yz[t0][x + 12][y + 17][z + 12] + tau_zz[t0][x + 12][y + 12][z + 7] - tau_zz[t0][x + 12][y + 12][z + 18]) + 3.59005404e-5F*(-tau_xz[t0][x + 7][y + 12][z + 12] + tau_xz[t0][x + 16][y + 12][z + 12] - tau_yz[t0][x + 12][y + 7][z + 12] + tau_yz[t0][x + 12][y + 16][z + 12] - tau_zz[t0][x + 12][y + 12][z + 8] + tau_zz[t0][x + 12][y + 12][z + 17]) + 2.96728956e-4F*(tau_xz[t0][x + 8][y + 12][z + 12] - tau_xz[t0][x + 15][y + 12][z + 12] + tau_yz[t0][x + 12][y + 8][z + 12] - tau_yz[t0][x + 12][y + 15][z + 12] + tau_zz[t0][x + 12][y + 12][z + 9] - tau_zz[t0][x + 12][y + 12][z + 16]) + 1.74476626e-3F*(-tau_xz[t0][x + 9][y + 12][z + 12] + tau_xz[t0][x + 14][y + 12][z + 12] - tau_yz[t0][x + 12][y + 9][z + 12] + tau_yz[t0][x + 12][y + 14][z + 12] - tau_zz[t0][x + 12][y + 12][z + 10] + tau_zz[t0][x + 12][y + 12][z + 15]) + 9.6931459e-3F*(tau_xz[t0][x + 10][y + 12][z + 12] - tau_xz[t0][x + 13][y + 12][z + 12] + tau_yz[t0][x + 12][y + 10][z + 12] - tau_yz[t0][x + 12][y + 13][z + 12] + tau_zz[t0][x + 12][y + 12][z + 11] - tau_zz[t0][x + 12][y + 12][z + 14]) + 1.22133638e-1F*(-tau_xz[t0][x + 11][y + 12][z + 12] + tau_xz[t0][x + 12][y + 12][z + 12] - tau_yz[t0][x + 12][y + 11][z + 12] + tau_yz[t0][x + 12][y + 12][z + 12] - tau_zz[t0][x + 12][y + 12][z + 12] + tau_zz[t0][x + 12][y + 12][z + 13]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*v_z[t0][x + 12][y + 12][z + 12]; } } } } } } } void bf1(struct dataobj *restrict damp_vec, struct dataobj *restrict lam_vec, struct dataobj *restrict mu_vec, struct dataobj *restrict tau_xx_vec, struct dataobj *restrict tau_xy_vec, struct dataobj *restrict tau_xz_vec, struct dataobj *restrict tau_yy_vec, struct dataobj *restrict tau_yz_vec, struct dataobj *restrict tau_zz_vec, struct dataobj *restrict v_x_vec, struct dataobj *restrict v_y_vec, struct dataobj *restrict v_z_vec, const int t0, const int t1, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict lam)[lam_vec->size[1]][lam_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[lam_vec->size[1]][lam_vec->size[2]]) lam_vec->data; float (*restrict mu)[mu_vec->size[1]][mu_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[mu_vec->size[1]][mu_vec->size[2]]) mu_vec->data; float (*restrict tau_xx)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xx_vec->size[1]][tau_xx_vec->size[2]][tau_xx_vec->size[3]]) tau_xx_vec->data; float (*restrict tau_xy)[tau_xy_vec->size[1]][tau_xy_vec->size[2]][tau_xy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xy_vec->size[1]][tau_xy_vec->size[2]][tau_xy_vec->size[3]]) tau_xy_vec->data; float (*restrict tau_xz)[tau_xz_vec->size[1]][tau_xz_vec->size[2]][tau_xz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_xz_vec->size[1]][tau_xz_vec->size[2]][tau_xz_vec->size[3]]) tau_xz_vec->data; float (*restrict tau_yy)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yy_vec->size[1]][tau_yy_vec->size[2]][tau_yy_vec->size[3]]) tau_yy_vec->data; float (*restrict tau_yz)[tau_yz_vec->size[1]][tau_yz_vec->size[2]][tau_yz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_yz_vec->size[1]][tau_yz_vec->size[2]][tau_yz_vec->size[3]]) tau_yz_vec->data; float (*restrict tau_zz)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[tau_zz_vec->size[1]][tau_zz_vec->size[2]][tau_zz_vec->size[3]]) tau_zz_vec->data; float (*restrict v_x)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_x_vec->size[1]][v_x_vec->size[2]][v_x_vec->size[3]]) v_x_vec->data; float (*restrict v_y)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_y_vec->size[1]][v_y_vec->size[2]][v_y_vec->size[3]]) v_y_vec->data; float (*restrict v_z)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_z_vec->size[1]][v_z_vec->size[2]][v_z_vec->size[3]]) v_z_vec->data; if (x1_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(1) schedule(dynamic,1) for (int x1_blk0 = x_m; x1_blk0 <= x_M; x1_blk0 += x1_blk0_size) { for (int y1_blk0 = y_m; y1_blk0 <= y_M; y1_blk0 += y1_blk0_size) { for (int x = x1_blk0; x <= x1_blk0 + x1_blk0_size - 1; x += 1) { for (int y = y1_blk0; y <= y1_blk0 + y1_blk0_size - 1; y += 1) { #pragma omp simd aligned(damp,lam,mu,tau_xx,tau_xy,tau_xz,tau_yy,tau_yz,tau_zz,v_x,v_y,v_z:32) for (int z = z_m; z <= z_M; z += 1) { float r70 = -v_z[t1][x + 12][y + 12][z + 12]; float r69 = -v_y[t1][x + 12][y + 12][z + 12]; float r68 = -v_x[t1][x + 12][y + 12][z + 12]; float r67 = -v_z[t1][x + 12][y + 12][z + 11]; float r66 = -v_y[t1][x + 12][y + 11][z + 12]; float r65 = -v_x[t1][x + 11][y + 12][z + 12]; float r64 = -v_z[t1][x + 12][y + 12][z + 9]; float r63 = -v_y[t1][x + 12][y + 9][z + 12]; float r62 = -v_x[t1][x + 9][y + 12][z + 12]; float r61 = -v_z[t1][x + 12][y + 12][z + 13]; float r60 = -v_y[t1][x + 12][y + 13][z + 12]; float r59 = -v_x[t1][x + 13][y + 12][z + 12]; float r58 = -v_z[t1][x + 12][y + 12][z + 15]; float r57 = -v_y[t1][x + 12][y + 15][z + 12]; float r56 = -v_x[t1][x + 15][y + 12][z + 12]; float r55 = -v_z[t1][x + 12][y + 12][z + 17]; float r54 = -v_y[t1][x + 12][y + 17][z + 12]; float r53 = -v_x[t1][x + 17][y + 12][z + 12]; float r52 = -v_z[t1][x + 12][y + 12][z + 7]; float r51 = -v_y[t1][x + 12][y + 7][z + 12]; float r50 = -v_x[t1][x + 7][y + 12][z + 12]; float r49 = 1.402F*(3.59005404e-5F*(r50 + r51 + r52 + v_x[t1][x + 16][y + 12][z + 12] + v_y[t1][x + 12][y + 16][z + 12] + v_z[t1][x + 12][y + 12][z + 16]) + 2.18478119e-6F*(r53 + r54 + r55 + v_x[t1][x + 6][y + 12][z + 12] + v_y[t1][x + 12][y + 6][z + 12] + v_z[t1][x + 12][y + 12][z + 6]) + 2.96728956e-4F*(r56 + r57 + r58 + v_x[t1][x + 8][y + 12][z + 12] + v_y[t1][x + 12][y + 8][z + 12] + v_z[t1][x + 12][y + 12][z + 8]) + 9.6931459e-3F*(r59 + r60 + r61 + v_x[t1][x + 10][y + 12][z + 12] + v_y[t1][x + 12][y + 10][z + 12] + v_z[t1][x + 12][y + 12][z + 10]) + 1.74476626e-3F*(r62 + r63 + r64 + v_x[t1][x + 14][y + 12][z + 12] + v_y[t1][x + 12][y + 14][z + 12] + v_z[t1][x + 12][y + 12][z + 14]) + 1.22133638e-1F*(r65 + r66 + r67 + v_x[t1][x + 12][y + 12][z + 12] + v_y[t1][x + 12][y + 12][z + 12] + v_z[t1][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1]*lam[x + 12][y + 12][z + 12]; tau_xx[t1][x + 12][y + 12][z + 12] = r49 + 2.804F*(3.59005404e-5F*(r50 + v_x[t1][x + 16][y + 12][z + 12]) + 2.18478119e-6F*(r53 + v_x[t1][x + 6][y + 12][z + 12]) + 2.96728956e-4F*(r56 + v_x[t1][x + 8][y + 12][z + 12]) + 9.6931459e-3F*(r59 + v_x[t1][x + 10][y + 12][z + 12]) + 1.74476626e-3F*(r62 + v_x[t1][x + 14][y + 12][z + 12]) + 1.22133638e-1F*(r65 + v_x[t1][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1]*mu[x + 12][y + 12][z + 12] + damp[x + 1][y + 1][z + 1]*tau_xx[t0][x + 12][y + 12][z + 12]; tau_xy[t1][x + 12][y + 12][z + 12] = 3.50499987602234e-1F*(mu[x + 12][y + 12][z + 12] + mu[x + 12][y + 13][z + 12] + mu[x + 13][y + 12][z + 12] + mu[x + 13][y + 13][z + 12])*(1.22133638e-1F*(r68 + r69 + v_x[t1][x + 12][y + 13][z + 12] + v_y[t1][x + 13][y + 12][z + 12]) + 2.18478119e-6F*(v_x[t1][x + 12][y + 7][z + 12] - v_x[t1][x + 12][y + 18][z + 12] + v_y[t1][x + 7][y + 12][z + 12] - v_y[t1][x + 18][y + 12][z + 12]) + 3.59005404e-5F*(-v_x[t1][x + 12][y + 8][z + 12] + v_x[t1][x + 12][y + 17][z + 12] - v_y[t1][x + 8][y + 12][z + 12] + v_y[t1][x + 17][y + 12][z + 12]) + 2.96728956e-4F*(v_x[t1][x + 12][y + 9][z + 12] - v_x[t1][x + 12][y + 16][z + 12] + v_y[t1][x + 9][y + 12][z + 12] - v_y[t1][x + 16][y + 12][z + 12]) + 1.74476626e-3F*(-v_x[t1][x + 12][y + 10][z + 12] + v_x[t1][x + 12][y + 15][z + 12] - v_y[t1][x + 10][y + 12][z + 12] + v_y[t1][x + 15][y + 12][z + 12]) + 9.6931459e-3F*(v_x[t1][x + 12][y + 11][z + 12] - v_x[t1][x + 12][y + 14][z + 12] + v_y[t1][x + 11][y + 12][z + 12] - v_y[t1][x + 14][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*tau_xy[t0][x + 12][y + 12][z + 12]; tau_xz[t1][x + 12][y + 12][z + 12] = 3.50499987602234e-1F*(mu[x + 12][y + 12][z + 12] + mu[x + 12][y + 12][z + 13] + mu[x + 13][y + 12][z + 12] + mu[x + 13][y + 12][z + 13])*(1.22133638e-1F*(r68 + r70 + v_x[t1][x + 12][y + 12][z + 13] + v_z[t1][x + 13][y + 12][z + 12]) + 2.18478119e-6F*(v_x[t1][x + 12][y + 12][z + 7] - v_x[t1][x + 12][y + 12][z + 18] + v_z[t1][x + 7][y + 12][z + 12] - v_z[t1][x + 18][y + 12][z + 12]) + 3.59005404e-5F*(-v_x[t1][x + 12][y + 12][z + 8] + v_x[t1][x + 12][y + 12][z + 17] - v_z[t1][x + 8][y + 12][z + 12] + v_z[t1][x + 17][y + 12][z + 12]) + 2.96728956e-4F*(v_x[t1][x + 12][y + 12][z + 9] - v_x[t1][x + 12][y + 12][z + 16] + v_z[t1][x + 9][y + 12][z + 12] - v_z[t1][x + 16][y + 12][z + 12]) + 1.74476626e-3F*(-v_x[t1][x + 12][y + 12][z + 10] + v_x[t1][x + 12][y + 12][z + 15] - v_z[t1][x + 10][y + 12][z + 12] + v_z[t1][x + 15][y + 12][z + 12]) + 9.6931459e-3F*(v_x[t1][x + 12][y + 12][z + 11] - v_x[t1][x + 12][y + 12][z + 14] + v_z[t1][x + 11][y + 12][z + 12] - v_z[t1][x + 14][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*tau_xz[t0][x + 12][y + 12][z + 12]; tau_yy[t1][x + 12][y + 12][z + 12] = r49 + 2.804F*(3.59005404e-5F*(r51 + v_y[t1][x + 12][y + 16][z + 12]) + 2.18478119e-6F*(r54 + v_y[t1][x + 12][y + 6][z + 12]) + 2.96728956e-4F*(r57 + v_y[t1][x + 12][y + 8][z + 12]) + 9.6931459e-3F*(r60 + v_y[t1][x + 12][y + 10][z + 12]) + 1.74476626e-3F*(r63 + v_y[t1][x + 12][y + 14][z + 12]) + 1.22133638e-1F*(r66 + v_y[t1][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1]*mu[x + 12][y + 12][z + 12] + damp[x + 1][y + 1][z + 1]*tau_yy[t0][x + 12][y + 12][z + 12]; tau_yz[t1][x + 12][y + 12][z + 12] = 3.50499987602234e-1F*(mu[x + 12][y + 12][z + 12] + mu[x + 12][y + 12][z + 13] + mu[x + 12][y + 13][z + 12] + mu[x + 12][y + 13][z + 13])*(1.22133638e-1F*(r69 + r70 + v_y[t1][x + 12][y + 12][z + 13] + v_z[t1][x + 12][y + 13][z + 12]) + 2.18478119e-6F*(v_y[t1][x + 12][y + 12][z + 7] - v_y[t1][x + 12][y + 12][z + 18] + v_z[t1][x + 12][y + 7][z + 12] - v_z[t1][x + 12][y + 18][z + 12]) + 3.59005404e-5F*(-v_y[t1][x + 12][y + 12][z + 8] + v_y[t1][x + 12][y + 12][z + 17] - v_z[t1][x + 12][y + 8][z + 12] + v_z[t1][x + 12][y + 17][z + 12]) + 2.96728956e-4F*(v_y[t1][x + 12][y + 12][z + 9] - v_y[t1][x + 12][y + 12][z + 16] + v_z[t1][x + 12][y + 9][z + 12] - v_z[t1][x + 12][y + 16][z + 12]) + 1.74476626e-3F*(-v_y[t1][x + 12][y + 12][z + 10] + v_y[t1][x + 12][y + 12][z + 15] - v_z[t1][x + 12][y + 10][z + 12] + v_z[t1][x + 12][y + 15][z + 12]) + 9.6931459e-3F*(v_y[t1][x + 12][y + 12][z + 11] - v_y[t1][x + 12][y + 12][z + 14] + v_z[t1][x + 12][y + 11][z + 12] - v_z[t1][x + 12][y + 14][z + 12]))*damp[x + 1][y + 1][z + 1] + damp[x + 1][y + 1][z + 1]*tau_yz[t0][x + 12][y + 12][z + 12]; tau_zz[t1][x + 12][y + 12][z + 12] = r49 + 2.804F*(3.59005404e-5F*(r52 + v_z[t1][x + 12][y + 12][z + 16]) + 2.18478119e-6F*(r55 + v_z[t1][x + 12][y + 12][z + 6]) + 2.96728956e-4F*(r58 + v_z[t1][x + 12][y + 12][z + 8]) + 9.6931459e-3F*(r61 + v_z[t1][x + 12][y + 12][z + 10]) + 1.74476626e-3F*(r64 + v_z[t1][x + 12][y + 12][z + 14]) + 1.22133638e-1F*(r67 + v_z[t1][x + 12][y + 12][z + 12]))*damp[x + 1][y + 1][z + 1]*mu[x + 12][y + 12][z + 12] + damp[x + 1][y + 1][z + 1]*tau_zz[t0][x + 12][y + 12][z + 12]; } } } } } } }
DRB059-lastprivate-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Using lastprivate() to resolve an output dependence. Semantics of lastprivate (x): causes the corresponding original list item to be updated after the end of the region. The compiler/runtime copies the local value back to the shared one within the last iteration. */ #include <stdio.h> #include <omp.h> void foo() { int i; int x; #pragma omp parallel for private (i) lastprivate (x) for (i = 0; i <= 99; i += 1) { x = i; } printf("x=%d",x); } int main() { foo(); return 0; }
GB_binop__bclr_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_int8) // A.*B function (eWiseMult): GB (_AemultB_08__bclr_int8) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_int8) // A.*B function (eWiseMult): GB (_AemultB_04__bclr_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int8) // C=scalar+B GB (_bind1st__bclr_int8) // C=scalar+B' GB (_bind1st_tran__bclr_int8) // C=A+scalar GB (_bind2nd__bclr_int8) // C=A'+scalar GB (_bind2nd_tran__bclr_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_BITCLR (aij, bij, int8_t, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, int8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT8 || GxB_NO_BCLR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bclr_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bclr_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, int8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bclr_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, int8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bclr_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_sgemm_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack1to4_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void im2col_sgemm_pack1to4_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_pack1to4_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void im2col_sgemm_pack1to4_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_pack1to4_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt); return; } #endif // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; if (inch >= 4) { #if __AVX2__ if (size >= 4) tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); #else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); #endif } else { #if __AVX2__ if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); #else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); #endif } { #if __AVX2__ int remain_size_start = 0; int nn_size = size >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; signed char* tmpptr = tmp.channel(i / 4); int q = 0; for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr[8] = img0[2]; tmpptr[9] = img1[2]; tmpptr[10] = img2[2]; tmpptr[11] = img3[2]; tmpptr[12] = img0[3]; tmpptr[13] = img1[3]; tmpptr[14] = img2[3]; tmpptr[15] = img3[3]; tmpptr += 16; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __AVX2__ signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 2); #endif int q = 0; for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __AVX2__ signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif int q = 0; for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __AVX2__ for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 4); const signed char* kptr0 = kernel.channel(p / 4); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; __m256i _sum00_12 = _mm256_setzero_si256(); __m256i _sum20_32 = _mm256_setzero_si256(); if (nn4 > 0) { #if __AVXVNNI__ || __AVX512VNNI__ __m256i _sum10_02 = _mm256_setzero_si256(); __m256i _sum30_22 = _mm256_setzero_si256(); #else __m256i _sum10_02 = _mm256_setzero_si256(); __m256i _sum01_13 = _mm256_setzero_si256(); __m256i _sum11_03 = _mm256_setzero_si256(); __m256i _sum30_22 = _mm256_setzero_si256(); __m256i _sum21_33 = _mm256_setzero_si256(); __m256i _sum31_23 = _mm256_setzero_si256(); #endif int j = 0; for (; j < nn4; j++) { __m128i _val0123 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val0123_16 = _mm256_cvtepi8_epi16(_val0123); __m256i _val01_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(1, 1, 0, 0)); __m256i _val23_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(3, 3, 2, 2)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); __m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16); _sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16); _sum20_32 = _mm256_dpwssd_epi32(_sum20_32, _val23_16, _w01_16); _sum30_22 = _mm256_dpwssd_epi32(_sum30_22, _val32_16, _w01_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); __m256i _sl20_31 = _mm256_mullo_epi16(_val23_16, _w01_16); __m256i _sh20_31 = _mm256_mulhi_epi16(_val23_16, _w01_16); __m256i _sl30_21 = _mm256_mullo_epi16(_val32_16, _w01_16); __m256i _sh30_21 = _mm256_mulhi_epi16(_val32_16, _w01_16); _sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); _sum20_32 = _mm256_add_epi32(_sum20_32, _mm256_unpacklo_epi16(_sl20_31, _sh20_31)); _sum30_22 = _mm256_add_epi32(_sum30_22, _mm256_unpacklo_epi16(_sl30_21, _sh30_21)); _sum21_33 = _mm256_add_epi32(_sum21_33, _mm256_unpackhi_epi16(_sl20_31, _sh20_31)); _sum31_23 = _mm256_add_epi32(_sum31_23, _mm256_unpackhi_epi16(_sl30_21, _sh30_21)); #endif tmpptr += 16; kptr0 += 16; } #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02); _sum20_32 = _mm256_hadd_epi32(_sum20_32, _sum30_22); __m256i _perm_mask = _mm256_set_epi32(5, 1, 6, 2, 7, 3, 4, 0); _sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask); _sum20_32 = _mm256_permutevar8x32_epi32(_sum20_32, _perm_mask); #else // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02); _tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02); _tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum20_32, _sum30_22); _tmp1 = _mm256_unpacklo_epi32(_sum21_33, _sum31_23); _tmp2 = _mm256_unpackhi_epi32(_sum20_32, _sum30_22); _tmp3 = _mm256_unpackhi_epi32(_sum21_33, _sum31_23); _sum20_32 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum30_22 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum21_33 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum31_23 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02); _sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13); _sum20_32 = _mm256_add_epi32(_sum20_32, _sum30_22); _sum21_33 = _mm256_add_epi32(_sum21_33, _sum31_23); _sum20_32 = _mm256_add_epi32(_sum20_32, _sum21_33); __m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0); _sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask); _sum20_32 = _mm256_permutevar8x32_epi32(_sum20_32, _perm_mask); #endif } __m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0); __m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1); __m128i _sum20 = _mm256_extracti128_si256(_sum20_32, 0); __m128i _sum30 = _mm256_extracti128_si256(_sum20_32, 1); int j = 0; for (; j < nn1; j++) { __m128i _val01 = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]); __m128i _val23 = _mm_set_epi16(tmpptr[3], tmpptr[3], tmpptr[3], tmpptr[3], tmpptr[2], tmpptr[2], tmpptr[2], tmpptr[2]); __m128i _w0123 = _mm_set_epi16(kptr0[3], kptr0[2], kptr0[1], kptr0[0], kptr0[3], kptr0[2], kptr0[1], kptr0[0]); __m128i _sl00 = _mm_mullo_epi16(_val01, _w0123); __m128i _sh00 = _mm_mulhi_epi16(_val01, _w0123); __m128i _sl10 = _mm_mullo_epi16(_val23, _w0123); __m128i _sh10 = _mm_mulhi_epi16(_val23, _w0123); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00)); _sum20 = _mm_add_epi32(_sum20, _mm_unpacklo_epi16(_sl10, _sh10)); _sum30 = _mm_add_epi32(_sum30, _mm_unpackhi_epi16(_sl10, _sh10)); tmpptr += 4; kptr0 += 4; } _mm_storeu_si128((__m128i*)outptr0, _sum00); _mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10); _mm_storeu_si128((__m128i*)(outptr0 + 8), _sum20); _mm_storeu_si128((__m128i*)(outptr0 + 12), _sum30); outptr0 += 16; } #endif for (; i + 1 < size; i += 2) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; #if __AVX2__ __m256i _sum00_12 = _mm256_setzero_si256(); #else __m128i _sum00 = _mm_setzero_si128(); __m128i _sum10 = _mm_setzero_si128(); #endif if (nn4 > 0) { #if __AVX2__ #if __AVXVNNI__ || __AVX512VNNI__ __m256i _sum10_02 = _mm256_setzero_si256(); #else __m256i _sum10_02 = _mm256_setzero_si256(); __m256i _sum01_13 = _mm256_setzero_si256(); __m256i _sum11_03 = _mm256_setzero_si256(); #endif #else __m128i _sum01 = _mm_setzero_si128(); __m128i _sum02 = _mm_setzero_si128(); __m128i _sum03 = _mm_setzero_si128(); __m128i _sum11 = _mm_setzero_si128(); __m128i _sum12 = _mm_setzero_si128(); __m128i _sum13 = _mm_setzero_si128(); #endif int j = 0; for (; j < nn4; j++) { #if __AVX2__ __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); _val01_16 = _mm256_permute4x64_epi64(_val01_16, _MM_SHUFFLE(1, 1, 0, 0)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16); _sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); _sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); #endif #else __m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ _val01 = _mm_cvtepi8_epi16(_val01); #else __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); _val01 = _mm_unpacklo_epi8(_val01, _extval01); #endif __m128i _val0 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _val1 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(3, 2, 3, 2)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl01 = _mm_mullo_epi16(_val0, _w1); __m128i _sh01 = _mm_mulhi_epi16(_val0, _w1); __m128i _sl10 = _mm_mullo_epi16(_val1, _w0); __m128i _sh10 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl11 = _mm_mullo_epi16(_val1, _w1); __m128i _sh11 = _mm_mulhi_epi16(_val1, _w1); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl00, _sh00)); _sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl01, _sh01)); _sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl01, _sh01)); _sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10)); _sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl10, _sh10)); _sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl11, _sh11)); _sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl11, _sh11)); #endif tmpptr += 8; kptr0 += 16; } #if __AVX2__ #if __AVXVNNI__ || __AVX512VNNI__ _sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02); __m256i _perm_mask = _mm256_set_epi32(5, 1, 6, 2, 7, 3, 4, 0); _sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask); #else // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02); _tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02); _tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02); _sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03); _sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13); __m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0); _sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask); #endif #else // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum00, _sum01); _tmp1 = _mm_unpacklo_epi32(_sum02, _sum03); _tmp2 = _mm_unpackhi_epi32(_sum00, _sum01); _tmp3 = _mm_unpackhi_epi32(_sum02, _sum03); _sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3); } { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum10, _sum11); _tmp1 = _mm_unpacklo_epi32(_sum12, _sum13); _tmp2 = _mm_unpackhi_epi32(_sum10, _sum11); _tmp3 = _mm_unpackhi_epi32(_sum12, _sum13); _sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum00 = _mm_add_epi32(_sum00, _sum01); _sum02 = _mm_add_epi32(_sum02, _sum03); _sum10 = _mm_add_epi32(_sum10, _sum11); _sum12 = _mm_add_epi32(_sum12, _sum13); _sum00 = _mm_add_epi32(_sum00, _sum02); _sum10 = _mm_add_epi32(_sum10, _sum12); #endif } #if __AVX2__ __m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0); __m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1); #endif int j = 0; for (; j < nn1; j++) { __m128i _val = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]); __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ _w0123 = _mm_cvtepi8_epi16(_w0123); #else __m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123); _w0123 = _mm_unpacklo_epi8(_w0123, _extw0123); #endif _w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _sl00 = _mm_mullo_epi16(_val, _w0123); __m128i _sh00 = _mm_mulhi_epi16(_val, _w0123); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00)); tmpptr += 2; kptr0 += 4; } _mm_storeu_si128((__m128i*)outptr0, _sum00); _mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10); outptr0 += 8; } for (; i < size; i++) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p); int nn4 = (inch / 4) * maxk; int nn1 = (inch % 4) * maxk; __m128i _sum0 = _mm_setzero_si128(); if (nn4 > 0) { __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); int j = 0; for (; j < nn4; j++) { __m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ __m128i _val0 = _mm_cvtepi8_epi16(_val01); #else __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); __m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01); #endif _val0 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 0, 1, 0)); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl01 = _mm_mullo_epi16(_val0, _w1); __m128i _sh01 = _mm_mulhi_epi16(_val0, _w1); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl01, _sh01)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl01, _sh01)); tmpptr += 4; kptr0 += 16; } // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum0, _sum1); _tmp1 = _mm_unpacklo_epi32(_sum2, _sum3); _tmp2 = _mm_unpackhi_epi32(_sum0, _sum1); _tmp3 = _mm_unpackhi_epi32(_sum2, _sum3); _sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum0 = _mm_add_epi32(_sum0, _sum2); } int j = 0; for (; j < nn1; j++) { __m128i _val = _mm_set1_epi16(tmpptr[0]); __m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0); #if __SSE4_1__ _w0123 = _mm_cvtepi8_epi16(_w0123); #else __m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123); _w0123 = _mm_unpacklo_epi8(_w0123, _extw0123); #endif __m128i _sl00 = _mm_mullo_epi16(_val, _w0123); __m128i _sh00 = _mm_mulhi_epi16(_val, _w0123); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); tmpptr += 1; kptr0 += 4; } _mm_storeu_si128((__m128i*)outptr0, _sum0); outptr0 += 4; } } } static void convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 4a-4b-maxk-inch/4a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); if (inch >= 4) kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4, (size_t)1u); else kernel_tm.create(4 * maxk, inch, outch / 4, (size_t)1u); for (int q = 0; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); int p = 0; for (; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1to4_int8_sse(bottom_im2col, top_blob, kernel, opt); }
sigmoid_arm_func.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef SIGMOID_OP #pragma once #include "operators/op_param.h" #if __ARM_NEON #include <arm_neon.h> #include "operators/math/math_func_neon.h" #endif namespace paddle_mobile { namespace operators { using framework::DDim; void sigmoid(const Tensor *X, Tensor *Y) { #if __ARM_NEON const float *input = X->data<float>(); float *output = Y->mutable_data<float>(); const DDim &dDim = X->dims(); int axis_index = 1; if (dDim.size() < 4) { axis_index = 0; } DDim outer_ddim = paddle_mobile::framework::slice_ddim(dDim, 0, axis_index + 1); DDim inner_ddim = paddle_mobile::framework::slice_ddim(dDim, axis_index + 1, dDim.size()); int out_size = paddle_mobile::framework::product(outer_ddim); int inner_size = paddle_mobile::framework::product(inner_ddim); DLOG << "outsize=" << out_size; DLOG << "innersize=" << inner_size; #pragma omp parallel for for (int i = 0; i < out_size; ++i) { const float *input_outer_ptr = input + i * inner_size; float *output_outer_ptr = output + i * inner_size; int nn = inner_size >> 2; int remain = inner_size - (nn << 2); float32x4_t _one = vdupq_n_f32(1.f); for (; nn > 0; nn--) { float32x4_t data = vld1q_f32(input_outer_ptr); data = vnegq_f32(data); data = exp_ps(data); data = vaddq_f32(data, _one); float32x4_t out_data = vrecpeq_f32(data); out_data = vmulq_f32(vrecpsq_f32(data, out_data), out_data); vst1q_f32(output_outer_ptr, out_data); input_outer_ptr += 4; output_outer_ptr += 4; } for (; remain > 0; remain--) { *output_outer_ptr = 1.f / (1.f + exp(-*input_outer_ptr)); output_outer_ptr++; input_outer_ptr++; } } #endif } template <typename P> void SigmoidCompute(const SigmoidParam &param) { const Tensor *in_x = param.InputX(); Tensor *out = param.Out(); auto x_dims = in_x->dims(); out->Resize(x_dims); sigmoid(in_x, out); } } // namespace operators } // namespace paddle_mobile #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,4),ceild(4*t2-Nz+5,8));t3<=min(min(floord(4*Nt+Ny-9,8),floord(2*t1+Ny-3,8)),floord(4*t2+Ny-9,8));t3++) { for (t4=max(max(ceild(t1-60,64),ceild(4*t2-Nz-115,128)),ceild(8*t3-Ny-115,128));t4<=min(min(min(floord(4*Nt+Nx-9,128),floord(2*t1+Nx-3,128)),floord(4*t2+Nx-9,128)),floord(8*t3+Nx-5,128));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(128*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
upwind5_impl_c_.c
static double denom; static int size; void upwind5_init(int asize, double adx) { size = asize; denom = 1.0 / (60.0 * adx); } void upwind5_interpolate( double *a, double *b, double *c, double *d, double *e, double *f, double *g, double * restrict u_x_plus, double * restrict u_x_minus) { int i; // Constant is used for auto-vectorization in GCC. const int ub = size; double a1, b1, c1, d1, e1, f1, g1; #pragma omp simd private(a1, b1, c1, d1, e1, f1, g1) for (i = 0; i < ub; ++i) { a1 = a[i]; b1 = b[i]; c1 = c[i]; d1 = d[i]; e1 = e[i]; f1 = f[i]; g1 = g[i]; u_x_minus[i] = (-2*a1 + 15*b1 - 60*c1 + 20*d1 + 30*e1 - 3*f1) * denom; u_x_plus[i] = (3*b1 - 30*c1 - 20*d1 + 60*e1 - 15*f1 + 2*g1) * denom; } }
flowinfo_ipv4_dst.c
/* * Copyright 2014-2017 Nippon Telegraph and Telephone Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file flowinfo_ipv4_dst.c * @brief Optimized flow database for dataplane, for ipv4_dst */ #include <stdlib.h> #include "openflow.h" #include "lagopus_apis.h" #include "lagopus/flowdb.h" #include "pktbuf.h" #include "packet.h" #include "lagopus/flowinfo.h" #define OXM_FIELD_TYPE(field) ((field) >> 1) #define IPV4_DST_BITLEN (32) static lagopus_result_t add_flow_ipv4_dst_mask(struct flowinfo *, struct flow *); static lagopus_result_t del_flow_ipv4_dst_mask(struct flowinfo *, struct flow *); static struct flow * match_flow_ipv4_dst_mask(struct flowinfo *, struct lagopus_packet *, int32_t *); static struct flow * find_flow_ipv4_dst_mask(struct flowinfo *, struct flow *); static void destroy_flowinfo_ipv4_dst_mask(struct flowinfo *); static lagopus_result_t add_flow_ipv4_dst(struct flowinfo *, struct flow *); static lagopus_result_t del_flow_ipv4_dst(struct flowinfo *, struct flow *); static struct flow * match_flow_ipv4_dst(struct flowinfo *, struct lagopus_packet *, int32_t *); static struct flow * find_flow_ipv4_dst(struct flowinfo *, struct flow *); static void destroy_flowinfo_ipv4_dst(struct flowinfo *); static lagopus_result_t get_match_ipv4_dst(const struct match_list *match_list, uint32_t *ipv4_dst, uint32_t *mask) { const struct match *match; TAILQ_FOREACH(match, match_list, entry) { if (match->oxm_field == (OFPXMT_OFB_IPV4_DST << 1) + 1) { OS_MEMCPY(ipv4_dst, match->oxm_value, sizeof(*ipv4_dst)); OS_MEMCPY(mask, &match->oxm_value[4], sizeof(*mask)); break; } if (OXM_FIELD_TYPE(match->oxm_field) == OFPXMT_OFB_IPV4_DST) { OS_MEMCPY(ipv4_dst, match->oxm_value, sizeof(*ipv4_dst)); *mask = 0xffffffff; break; } } if (match == NULL) { return LAGOPUS_RESULT_NOT_FOUND; } return LAGOPUS_RESULT_OK; } struct flowinfo * new_flowinfo_ipv4_dst_mask(void) { struct flowinfo *self; self = calloc(1, sizeof(struct flowinfo)); if (self != NULL) { self->nflow = 0; self->nnext = 0; self->next = malloc(1); self->misc = new_flowinfo_ipv4_src_mask(); self->add_func = add_flow_ipv4_dst_mask; self->del_func = del_flow_ipv4_dst_mask; self->match_func = match_flow_ipv4_dst_mask; self->find_func = find_flow_ipv4_dst_mask; self->destroy_func = destroy_flowinfo_ipv4_dst_mask; } return self; } static void destroy_flowinfo_ipv4_dst_mask(struct flowinfo *self) { struct flowinfo *flowinfo; unsigned int i; for (i = 0; i < self->nnext; i++) { flowinfo = self->next[i]; flowinfo->destroy_func(flowinfo); } free(self->next); free(self); } static void freeup_flowinfo(void *val) { struct flowinfo *flowinfo; flowinfo = val; flowinfo->destroy_func(flowinfo); } struct flowinfo * new_flowinfo_ipv4_dst(void) { struct flowinfo *self; self = calloc(1, sizeof(struct flowinfo)); if (self != NULL) { lagopus_hashmap_create(&self->hashmap, LAGOPUS_HASHMAP_TYPE_ONE_WORD, freeup_flowinfo); /* misc is not used */ self->add_func = add_flow_ipv4_dst; self->del_func = del_flow_ipv4_dst; self->match_func = match_flow_ipv4_dst; self->find_func = find_flow_ipv4_dst; self->destroy_func = destroy_flowinfo_ipv4_dst; } return self; } static void destroy_flowinfo_ipv4_dst(struct flowinfo *self) { lagopus_hashmap_destroy(&self->hashmap, true); free(self); } static lagopus_result_t add_flow_ipv4_dst_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_dst, mask; lagopus_result_t rv; unsigned int i; rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { /* new node. */ flowinfo = new_flowinfo_ipv4_dst(); flowinfo->userdata = mask; self->next = realloc(self->next, (unsigned long)(self->nnext + 1) * sizeof(struct flowinfo *)); self->next[self->nnext] = flowinfo; self->nnext++; } rv = flowinfo->add_func(flowinfo, flow); } else { rv = self->misc->add_func(self->misc, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow++; } return rv; } static lagopus_result_t del_flow_ipv4_dst_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_dst, mask; lagopus_result_t rv; unsigned int i; rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { return LAGOPUS_RESULT_NOT_FOUND; } rv = flowinfo->del_func(flowinfo, flow); if (flowinfo->nflow == 0) { flowinfo->destroy_func(flowinfo); self->nnext--; memmove(&self->next[i], &self->next[i + 1], (self->nnext - i) * sizeof(struct flowinfo **)); } } else { rv = self->misc->del_func(self->misc, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow--; } return rv; } static struct flow * match_flow_ipv4_dst_mask(struct flowinfo *self, struct lagopus_packet *pkt, int32_t *pri) { struct flowinfo *flowinfo; struct flow *flow[self->nnext], *matched, *alt_flow; struct flow mismatched = { .priority = 0, .flags = 0, .idle_timeout = 0, .hard_timeout = 0, .match_list = {NULL, NULL}, .instruction_list = {NULL, NULL}, .field_bits = 0 }; unsigned int i; matched = &mismatched; //#pragma omp parallel for for (i = 0; i < self->nnext; i++) { flowinfo = self->next[i]; flow[i] = flowinfo->match_func(flowinfo, pkt, pri); } for (i = 0; i < self->nnext; i++) { if (flow[i] != NULL && flow[i]->priority > matched->priority) { matched = flow[i]; } } alt_flow = self->misc->match_func(self->misc, pkt, pri); if (alt_flow != NULL) { matched = alt_flow; } if (matched == &mismatched) { matched = NULL; } return matched; } static struct flow * find_flow_ipv4_dst_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_dst, mask; lagopus_result_t rv; unsigned int i; rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { return NULL; } } else { flowinfo = self->misc; } return flowinfo->find_func(flowinfo, flow); } static lagopus_result_t add_flow_ipv4_dst(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_dst, mask; lagopus_result_t rv; rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_dst, (void *)&flowinfo); if (rv != LAGOPUS_RESULT_OK) { void *val; flowinfo = new_flowinfo_ipv4(); val = flowinfo; lagopus_hashmap_add_no_lock(&self->hashmap, (void *)ipv4_dst, (void *)&val, false); } rv = flowinfo->add_func(flowinfo, flow); if (rv == LAGOPUS_RESULT_OK) { self->nflow++; } } return rv; } static lagopus_result_t del_flow_ipv4_dst(struct flowinfo *self, struct flow *flow) { uint32_t ipv4_dst, mask; lagopus_result_t rv; rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask); if (rv == LAGOPUS_RESULT_OK) { struct flowinfo *flowinfo; rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_dst, (void *)&flowinfo); if (rv == LAGOPUS_RESULT_OK) { flowinfo->del_func(flowinfo, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow--; } } return rv; } static struct flow * match_flow_ipv4_dst(struct flowinfo *self, struct lagopus_packet *pkt, int32_t *pri) { struct flowinfo *flowinfo; uint32_t ipv4_dst; struct flow *flow; lagopus_result_t rv; flow = NULL; ipv4_dst = (pkt->ipv4->ip_dst.s_addr & (uint32_t)self->userdata); rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_dst, (void *)&flowinfo); if (rv == LAGOPUS_RESULT_OK) { flow = flowinfo->match_func(flowinfo, pkt, pri); } return flow; } static struct flow * find_flow_ipv4_dst(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_dst, mask; lagopus_result_t rv; rv = get_match_ipv4_dst(&flow->match_list, &ipv4_dst, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_dst, (void *)&flowinfo); if (rv != LAGOPUS_RESULT_OK) { return NULL; } return flowinfo->find_func(flowinfo, flow); } else { return self->misc->find_func(self->misc, flow); } }
declare8.c
/* Example of multiple declare simd directives for a function Multiple SIMD versions of the function are generated. The invocation of a specific version of the function is determined by where it is called. */ #pragma omp declare simd linear(pixel) uniform(mask) inbranch #pragma omp declare simd linear(pixel) notinbranch #pragma omp declare simd extern void compute_pixel(char *pixel, char mask);
genetic.c
/* Genetic algorithm to explore xorshift-multiply-xorshift hashes. */ #include <math.h> #include <time.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> #define POOL 40 #define THRESHOLD 2.0 // Use exact when estimate is below this #define DONTCARE 0.3 // Only print tuples with bias below this threshold #define QUALITY 18 // 2^N iterations of estimate samples #define RESETMINS 90 // Reset pool after this many minutes of no progress static uint64_t rand64(uint64_t s[4]) { uint64_t x = s[1] * 5; uint64_t r = ((x << 7) | (x >> 57)) * 9; uint64_t t = s[1] << 17; s[2] ^= s[0]; s[3] ^= s[1]; s[1] ^= s[2]; s[0] ^= s[3]; s[2] ^= t; s[3] = (s[3] << 45) | (s[3] >> 19); return r; } #define FLAG_SCORED (1u << 0) #define FLAG_EXACT (1u << 1) #define FLAG_PRINTED (1u << 2) struct gene { double score; short s[3]; uint32_t c[2]; unsigned flags; }; static uint32_t hash(const struct gene *g, uint32_t x) { x ^= x >> g->s[0]; x *= g->c[0]; x ^= x >> g->s[1]; x *= g->c[1]; x ^= x >> g->s[2]; return x; } static double estimate_bias32(const struct gene *g, uint64_t rng[4]) { long n = 1L << QUALITY; long bins[32][32] = {{0}}; for (long i = 0; i < n; i++) { uint32_t x = rand64(rng); uint32_t h0 = hash(g, x); for (int j = 0; j < 32; j++) { uint32_t bit = UINT32_C(1) << j; uint32_t h1 = hash(g, x ^ bit); uint32_t set = h0 ^ h1; for (int k = 0; k < 32; k++) bins[j][k] += (set >> k) & 1; } } double mean = 0; for (int j = 0; j < 32; j++) { for (int k = 0; k < 32; k++) { double diff = (bins[j][k] - n / 2) / (n / 2.0); mean += (diff * diff) / (32 * 32); } } return sqrt(mean) * 1000.0; } #define EXACT_SPLIT 32 // must be power of two static double exact_bias32(const struct gene *g) { long long bins[32][32] = {{0}}; static const uint64_t range = (UINT64_C(1) << 32) / EXACT_SPLIT; #pragma omp parallel for for (int i = 0; i < EXACT_SPLIT; i++) { long long b[32][32] = {{0}}; for (uint64_t x = i * range; x < (i + 1) * range; x++) { uint32_t h0 = hash(g, x); for (int j = 0; j < 32; j++) { uint32_t bit = UINT32_C(1) << j; uint32_t h1 = hash(g, x ^ bit); uint32_t set = h0 ^ h1; for (int k = 0; k < 32; k++) b[j][k] += (set >> k) & 1; } } #pragma omp critical for (int j = 0; j < 32; j++) for (int k = 0; k < 32; k++) bins[j][k] += b[j][k]; } double mean = 0.0; for (int j = 0; j < 32; j++) { for (int k = 0; k < 32; k++) { double diff = (bins[j][k] - 2147483648L) / 2147483648.0; mean += (diff * diff) / (32 * 32); } } return sqrt(mean) * 1000.0; } static void gene_gen(struct gene *g, uint64_t rng[4]) { uint64_t s = rand64(rng); uint64_t c = rand64(rng); g->s[0] = 10 + (s >> 0) % 10; g->s[1] = 10 + (s >> 24) % 10; g->s[2] = 10 + (s >> 48) % 10; g->c[0] = c | 1u; g->c[1] = (c >> 32) | 1u; g->flags = 0; } static void gene_print(const struct gene *g, FILE *f) { fprintf(f, "[%2d %08lx %2d %08lx %2d]", g->s[0], (unsigned long)g->c[0], g->s[1], (unsigned long)g->c[1], g->s[2]); } static int small(uint64_t r) { static const int v[] = {-3, -2, -1, +1, +2, +3}; return v[r % 6]; } static void gene_mutate(struct gene *g, uint64_t rng[4]) { uint64_t r = rand64(rng); int s = r % 5; r >>= 3; switch (s) { case 0: g->s[0] += small(r); break; case 1: g->s[1] += small(r); break; case 2: g->s[2] += small(r); break; case 3: g->c[0] += (int)(r & 0xffff) - 32768; break; case 4: g->c[1] += (int)(r & 0xffff) - 32768; break; } g->flags = 0; } static void gene_cross(struct gene *g, const struct gene *a, const struct gene *b, uint64_t rng[4]) { uint64_t r = rand64(rng); *g = *a; switch (r & 2) { case 0: g->c[0] = b->c[0]; /* FALLTHROUGH */ case 1: g->s[1] = b->s[1]; /* FALLTHROUGH */ case 2: g->c[1] = b->c[1]; /* FALLTHROUGH */ case 3: g->s[2] = b->s[2]; } g->flags = 0; } static int gene_same(const struct gene *a, const struct gene *b) { return a->s[0] == b->s[0] && a->s[1] == b->s[1] && a->s[2] == b->s[2] && a->c[0] == b->c[0] && a->c[1] == b->c[1]; } static void rng_init(void *p, size_t len) { FILE *f = fopen("/dev/urandom", "rb"); if (!f) abort(); if (!fread(p, 1, len, f)) abort(); fclose(f); } static int cmp(const void *pa, const void *pb) { double a = *(double *)pa; double b = *(double *)pb; if (a < b) return -1; if (b < a) return 1; return 0; } static void undup(struct gene *pool, uint64_t rng[4]) { for (int i = 0; i < POOL; i++) for (int j = i + 1; j < POOL; j++) if (gene_same(pool + i, pool + j)) gene_mutate(pool + j, rng); } int main(void) { int verbose = 1; double best = 1000.0; time_t best_time = time(0); uint64_t rng[POOL][4]; struct gene pool[POOL]; rng_init(rng, sizeof(rng)); for (int i = 0; i < POOL; i++) gene_gen(pool + i, rng[0]); for (;;) { #pragma omp parallel for schedule(dynamic) for (int i = 0; i < POOL; i++) { if (!(pool[i].flags & FLAG_SCORED)) { pool[i].score = estimate_bias32(pool + i, rng[i]); pool[i].flags |= FLAG_SCORED; } } for (int i = 0; i < POOL; i++) { if (!(pool[i].flags & FLAG_EXACT) && pool[i].score < THRESHOLD) { pool[i].score = exact_bias32(pool + i); pool[i].flags |= FLAG_EXACT; } } qsort(pool, POOL, sizeof(*pool), cmp); if (verbose) { for (int i = 0; i < POOL; i++) { if (!(pool[i].flags & FLAG_PRINTED) && pool[i].score < DONTCARE) { gene_print(pool + i, stdout); printf(" = %.17g\n", pool[i].score); pool[i].flags |= FLAG_PRINTED; } } } time_t now = time(0); if (pool[0].score < best) { best = pool[0].score; best_time = now; } else if (now - best_time > RESETMINS * 60) { best = 1000.0; best_time = now; for (int i = 0; i < POOL; i++) gene_gen(pool + i, rng[0]); } int c = POOL / 4; for (int a = 0; c < POOL && a < POOL / 4; a++) for (int b = a + 1; c < POOL && b < POOL / 4; b++) gene_cross(pool + c++, pool + a, pool + b, rng[0]); undup(pool, rng[0]); } }
queens.c
#include "queens.h" int main(int argc, char* argv[]) { int rank, size; MPI_Status stat; char* tracefile; double tm; int node, wfinish = FINISH_WORK; int res, tres; //int** field; int i, j, m, m2; #ifdef OMP_MODE int th, limit; #endif if (argc != 2) printf("M parameter forgotten\n"); else if (sscanf(argv[1], "%d", &m) != 1) printf("Argument %s has wrong format. Integer value expected.\n"); else { m2 = m * m; MPI_Init(&argc, &argv); tracefile = getenv("TVTRACE"); if( tracefile != NULL ) MPI_Pcontrol(TRACEFILES, NULL, tracefile, 0); else MPI_Pcontrol(TRACEFILES, NULL, "trace", 0); MPI_Pcontrol(TRACELEVEL, 1, 1, 1); MPI_Pcontrol(TRACENODE, 1000000, 1, 1); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (rank == 0) { int** field = field_init(m); #ifdef OMP_MODE th = omp_get_max_threads(); limit = (size - 1) * th; #endif TIMER_START; for (i = 0; i < m; i++) for (j = 0; j < m; j++) if (!is_attacked(0, i, 1, j)) { int pos = make_double_pos(i, j); MPI_Recv(&node, 1, MPI_INT, MPI_ANY_SOURCE, WANT_TO_WORK, MPI_COMM_WORLD, &stat); MPI_Send(&pos, 1, MPI_INT, node, COL_NUMBER, MPI_COMM_WORLD); } res = 0; #ifdef OMP_MODE for (i = 0; i < limit; i++) #else for (i = 1; i < size; i++) #endif { MPI_Recv(&node, 1, MPI_INT, MPI_ANY_SOURCE, WANT_TO_WORK, MPI_COMM_WORLD, &stat); MPI_Send(&wfinish, 1, MPI_INT, node, COL_NUMBER, MPI_COMM_WORLD); MPI_Recv(&tres, 1, MPI_INT, node, RESULT_PACK, MPI_COMM_WORLD, &stat); res += tres; } tm = TIMER_GET; printf("Nodes involved: %d\nM = %d\nResult: %d variants\nTotal time: %f\n", size, m, tm, res); field_clear(field); } else { #ifdef OMP_MODE #pragma omp parallel private(i, res, stat) shared(rank, m, m2) #endif { res = 0; int** field = field_init(m); do { #ifdef OMP_MODE #pragma omp critical #endif { MPI_Send(&rank, 1, MPI_INT, 0, WANT_TO_WORK, MPI_COMM_WORLD); } #ifdef OMP_MODE #pragma omp critical #endif { MPI_Recv(&i, 1, MPI_INT, 0, COL_NUMBER, MPI_COMM_WORLD, &stat); } if (i == FINISH_WORK) { #ifdef OMP_MODE #pragma omp critical #endif { MPI_Send(&res, 1, MPI_INT, 0, RESULT_PACK, MPI_COMM_WORLD); } } else { memset(*field, 0, sizeof(int) * m2); int col1 = (i >> 16) & 0xFFFF; int col2 = i & 0xFFFF; res += count_variants_top(field, 0, col1, 1, col2, m); } } while (i != FINISH_WORK); field_clear(field); } } MPI_Finalize(); } return 0; } int** field_init(int m) { int i; int size = m * m; int** field = (int**)calloc(m, sizeof(int*)); int* buf = (int*)calloc(size, sizeof(int)); for (i = 0; i < m; i++) field[i] = &buf[i * m]; return field; } void field_clear(int** field) { free(*field); free(field); } int count_variants_top(int** field, int row1, int col1, int row2, int col2, int m) { int res; queens_put(field, row1, col1, m); queens_put(field, row2, col2, m); res = count_variants(field, row1 + 2, m); queens_remove(field, row2, col2, m); queens_remove(field, row1, col1, m); return res; } int count_variants(int** field, int row, int m) { int j, res; if (row == m) res = 2; else { res = 0; for (j = 0; j < m; j++) { if (field[row][j] == 0) { queens_put(field, row, j, m); res += count_variants(field, row + 1, m); queens_remove(field, row, j, m); } } } return res; } void queens_put(int** field, int i, int j, int m) { int k, j1, i1; int k1 = m - MAX(i, j), k2 = MIN(i, m - 1 - j); int a = ABS(i - j); // Горизонталь и вертикаль for (k = 0; k < m; k++) { MARK(field, i, k); MARK(field, k, j); } // Главная диагональ for (k = -MIN(i, j); k < k1; k++) MARK(field, i + k, j + k); // Побочная дианональ for (k = -MIN(m - 1 - i, j); k <= k2; k++) MARK(field, i - k, j + k); } void queens_remove(int** field, int i, int j, int m) { int k, j1, i1; int k1 = m - MAX(i, j), k2 = MIN(i, m - 1 - j); int a = ABS(i - j); // Горизонталь и вертикаль for (k = 0; k < m; k++) { UMARK(field, i, k); UMARK(field, k, j); } // Главная диагональ for (k = -MIN(i, j); k < k1; k++) UMARK(field, i + k, j + k); // Побочная дианональ for (k = -MIN(m - 1 - i, j); k <= k2; k++) UMARK(field, i - k, j + k); } int is_attacked(int row1, int col1, int row2, int col2) { return (ABS(row1 - row2) <= 1) && (ABS(col1 - col2) <= 1); } int make_double_pos(int col1, int col2) { return (col1 << 16) | col2; } void unmake_double_pos(int value, int* col1, int* col2) { *col1 = (value >> 16) & 0xFFFF; *col2 = value & 0xFFFF; }
zlaset.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" /******************************************************************************/ int plasma_zlaset(plasma_enum_t uplo, int m, int n, plasma_complex64_t alpha, plasma_complex64_t beta, plasma_complex64_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaGeneral) && (uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -5; } // quick return if (imin(n, m) == 0) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_general_desc_create() failed"); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); // Call tile async function. plasma_omp_zlaset(uplo, alpha, beta, A, sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /******************************************************************************/ void plasma_omp_zlaset(plasma_enum_t uplo, plasma_complex64_t alpha, plasma_complex64_t beta, plasma_desc_t A, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaGeneral) && (uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) return; // Call the parallel function. plasma_pzlaset(uplo, alpha, beta, A, sequence, request); }
choleskies_cython.c
/* Generated by Cython 0.22 */ #define PY_SSIZE_T_CLEAN #ifndef CYTHON_USE_PYLONG_INTERNALS #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 0 #else #include "pyconfig.h" #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 1 #else #define CYTHON_USE_PYLONG_INTERNALS 0 #endif #endif #endif #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_22" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE) #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #define __Pyx_PyFrozenSet_Size(s) PyObject_Size(s) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #define __Pyx_PyFrozenSet_Size(s) PySet_Size(s) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is a quiet NaN. */ float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None) #ifdef __cplusplus template<typename T> void __Pyx_call_destructor(T* x) { x->~T(); } template<typename T> class __Pyx_FakeReference { public: __Pyx_FakeReference() : ptr(NULL) { } __Pyx_FakeReference(T& ref) : ptr(&ref) { } T *operator->() { return ptr; } operator T&() { return *ptr; } private: T *ptr; }; #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__GPy__util__choleskies_cython #define __PYX_HAVE_API__GPy__util__choleskies_cython #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "pythread.h" #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ (sizeof(type) < sizeof(Py_ssize_t)) || \ (sizeof(type) > sizeof(Py_ssize_t) && \ likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX) && \ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \ v == (type)PY_SSIZE_T_MIN))) || \ (sizeof(type) == sizeof(Py_ssize_t) && \ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "GPy/util/choleskies_cython.pyx", "__init__.pxd", "GPy/util/stringsource", "type.pxd", }; struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && MSC_VER #include <Windows.h> #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using MSVC atomics" #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview) \ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview) \ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview) \ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview) \ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":729 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":736 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":741 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "scipy/linalg/cython_blas.pxd":15 * # The original libraries should be linked directly. * * ctypedef float s # <<<<<<<<<<<<<< * ctypedef double d * ctypedef float complex c */ typedef float __pyx_t_5scipy_6linalg_11cython_blas_s; /* "scipy/linalg/cython_blas.pxd":16 * * ctypedef float s * ctypedef double d # <<<<<<<<<<<<<< * ctypedef float complex c * ctypedef double complex z */ typedef double __pyx_t_5scipy_6linalg_11cython_blas_d; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":767 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* "View.MemoryView":99 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":269 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":302 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":921 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":302 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":921 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_XDECREF(tmp); \ } while (0) #define __Pyx_DECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_DECREF(tmp); \ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name); static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE long __Pyx_div_long(long, long); /* proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback); static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); #if PY_MAJOR_VERSION >= 3 static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); #include <string.h> static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* proto */ #define UNARY_NEG_WOULD_OVERFLOW(x) (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/ static int __Pyx_SetVtable(PyObject *dict, void *vtable); typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); static PyObject *__pyx_memview_get_double(const char *itemp); static int __pyx_memview_set_double(const char *itemp, PyObject *obj); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs, char order, int ndim); static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'scipy.linalg.cython_blas' */ static __pyx_t_5scipy_6linalg_11cython_blas_d (*__pyx_f_5scipy_6linalg_11cython_blas_ddot)(int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/ static void (*__pyx_f_5scipy_6linalg_11cython_blas_dscal)(int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/ static void (*__pyx_f_5scipy_6linalg_11cython_blas_dsymv)(char *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/ /* Module declarations from 'GPy.util.choleskies_cython' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static void __pyx_f_3GPy_4util_17choleskies_cython_chol_backprop(int, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "GPy.util.choleskies_cython" int __pyx_module_is_main_GPy__util__choleskies_cython = 0; /* Implementation of 'GPy.util.choleskies_cython' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_xrange; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_flat_to_triang(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_flat, int __pyx_v_M); /* proto */ static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_2triang_to_flat(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_L); /* proto */ static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_4backprop_gradient(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L); /* proto */ static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_6backprop_gradient_par(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L); /* proto */ static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_8backprop_gradient_par_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static char __pyx_k_B[] = "B"; static char __pyx_k_D[] = "D"; static char __pyx_k_H[] = "H"; static char __pyx_k_I[] = "I"; static char __pyx_k_L[] = "L"; static char __pyx_k_M[] = "M"; static char __pyx_k_N[] = "N"; static char __pyx_k_O[] = "O"; static char __pyx_k_Q[] = "Q"; static char __pyx_k_b[] = "b"; static char __pyx_k_c[] = "c"; static char __pyx_k_d[] = "d"; static char __pyx_k_f[] = "f"; static char __pyx_k_g[] = "g"; static char __pyx_k_h[] = "h"; static char __pyx_k_i[] = "i"; static char __pyx_k_j[] = "j"; static char __pyx_k_k[] = "k"; static char __pyx_k_l[] = "l"; static char __pyx_k_m[] = "m"; static char __pyx_k_q[] = "q"; static char __pyx_k_u[] = "u"; static char __pyx_k_Zd[] = "Zd"; static char __pyx_k_Zf[] = "Zf"; static char __pyx_k_Zg[] = "Zg"; static char __pyx_k_dL[] = "dL"; static char __pyx_k_id[] = "id"; static char __pyx_k_mm[] = "mm"; static char __pyx_k_np[] = "np"; static char __pyx_k_obj[] = "obj"; static char __pyx_k_ret[] = "ret"; static char __pyx_k_base[] = "base"; static char __pyx_k_flat[] = "flat"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_mode[] = "mode"; static char __pyx_k_name[] = "name"; static char __pyx_k_ndim[] = "ndim"; static char __pyx_k_pack[] = "pack"; static char __pyx_k_size[] = "size"; static char __pyx_k_step[] = "step"; static char __pyx_k_stop[] = "stop"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_tril[] = "tril"; static char __pyx_k_class[] = "__class__"; static char __pyx_k_count[] = "count"; static char __pyx_k_dL_dK[] = "dL_dK"; static char __pyx_k_empty[] = "empty"; static char __pyx_k_error[] = "error"; static char __pyx_k_flags[] = "flags"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_range[] = "range"; static char __pyx_k_shape[] = "shape"; static char __pyx_k_start[] = "start"; static char __pyx_k_zeros[] = "zeros"; static char __pyx_k_L_cont[] = "L_cont"; static char __pyx_k_format[] = "format"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_name_2[] = "__name__"; static char __pyx_k_struct[] = "struct"; static char __pyx_k_unpack[] = "unpack"; static char __pyx_k_xrange[] = "xrange"; static char __pyx_k_asarray[] = "asarray"; static char __pyx_k_fortran[] = "fortran"; static char __pyx_k_memview[] = "memview"; static char __pyx_k_Ellipsis[] = "Ellipsis"; static char __pyx_k_itemsize[] = "itemsize"; static char __pyx_k_TypeError[] = "TypeError"; static char __pyx_k_enumerate[] = "enumerate"; static char __pyx_k_IndexError[] = "IndexError"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static char __pyx_k_MemoryError[] = "MemoryError"; static char __pyx_k_RuntimeError[] = "RuntimeError"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_flat_to_triang[] = "flat_to_triang"; static char __pyx_k_triang_to_flat[] = "triang_to_flat"; static char __pyx_k_allocate_buffer[] = "allocate_buffer"; static char __pyx_k_dtype_is_object[] = "dtype_is_object"; static char __pyx_k_ascontiguousarray[] = "ascontiguousarray"; static char __pyx_k_backprop_gradient[] = "backprop_gradient"; static char __pyx_k_strided_and_direct[] = "<strided and direct>"; static char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static char __pyx_k_backprop_gradient_par[] = "backprop_gradient_par"; static char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static char __pyx_k_backprop_gradient_par_c[] = "backprop_gradient_par_c"; static char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static char __pyx_k_getbuffer_obj_view_flags[] = "getbuffer(obj, view, flags)"; static char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct"; static char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static char __pyx_k_GPy_util_choleskies_cython[] = "GPy.util.choleskies_cython"; static char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)"; static char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)"; static char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static char __pyx_k_home_james_work_GPy_GPy_util_ch[] = "/home/james/work/GPy/GPy/util/choleskies_cython.pyx"; static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced"; static char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions"; static char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_D; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_GPy_util_choleskies_cython; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_L; static PyObject *__pyx_n_s_L_cont; static PyObject *__pyx_n_s_M; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_s_N; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_asarray; static PyObject *__pyx_n_s_ascontiguousarray; static PyObject *__pyx_n_s_backprop_gradient; static PyObject *__pyx_n_s_backprop_gradient_par; static PyObject *__pyx_n_s_backprop_gradient_par_c; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_count; static PyObject *__pyx_n_s_d; static PyObject *__pyx_n_s_dL; static PyObject *__pyx_n_s_dL_dK; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_empty; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_flat; static PyObject *__pyx_n_s_flat_to_triang; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_kp_s_home_james_work_GPy_GPy_util_ch; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_m; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mm; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_ret; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_triang_to_flat; static PyObject *__pyx_n_s_tril; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_xrange; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_slice__16; static PyObject *__pyx_slice__17; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__27; static PyObject *__pyx_tuple__29; static PyObject *__pyx_tuple__30; static PyObject *__pyx_tuple__31; static PyObject *__pyx_tuple__32; static PyObject *__pyx_tuple__33; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__26; static PyObject *__pyx_codeobj__28; /* "GPy/util/choleskies_cython.pyx":12 * cimport scipy.linalg.cython_blas as cblas * * def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<< * """take a matrix N x D and return a D X M x M array where * */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_1flat_to_triang(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_3GPy_4util_17choleskies_cython_flat_to_triang[] = "take a matrix N x D and return a D X M x M array where\n\n N = M(M+1)/2\n\n the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat.\n "; static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_1flat_to_triang = {"flat_to_triang", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_1flat_to_triang, METH_VARARGS|METH_KEYWORDS, __pyx_doc_3GPy_4util_17choleskies_cython_flat_to_triang}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_1flat_to_triang(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_flat = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_M; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("flat_to_triang (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_flat,&__pyx_n_s_M,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flat)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_M)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("flat_to_triang", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "flat_to_triang") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_flat = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_flat.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_M = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_M == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("flat_to_triang", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.flat_to_triang", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_flat_to_triang(__pyx_self, __pyx_v_flat, __pyx_v_M); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_flat_to_triang(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_flat, int __pyx_v_M) { int __pyx_v_D; CYTHON_UNUSED int __pyx_v_N; int __pyx_v_count; __Pyx_memviewslice __pyx_v_ret = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_d; int __pyx_v_m; int __pyx_v_mm; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; __Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; long __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("flat_to_triang", 0); /* "GPy/util/choleskies_cython.pyx":19 * the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat. * """ * cdef int D = flat.shape[1] # <<<<<<<<<<<<<< * cdef int N = flat.shape[0] * cdef int count = 0 */ __pyx_v_D = (__pyx_v_flat.shape[1]); /* "GPy/util/choleskies_cython.pyx":20 * """ * cdef int D = flat.shape[1] * cdef int N = flat.shape[0] # <<<<<<<<<<<<<< * cdef int count = 0 * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) */ __pyx_v_N = (__pyx_v_flat.shape[0]); /* "GPy/util/choleskies_cython.pyx":21 * cdef int D = flat.shape[1] * cdef int N = flat.shape[0] * cdef int count = 0 # <<<<<<<<<<<<<< * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) * cdef int d, m, mm */ __pyx_v_count = 0; /* "GPy/util/choleskies_cython.pyx":22 * cdef int N = flat.shape[0] * cdef int count = 0 * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) # <<<<<<<<<<<<<< * cdef int d, m, mm * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_D); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_5) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL; PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_7.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_ret = __pyx_t_7; __pyx_t_7.memview = NULL; __pyx_t_7.data = NULL; /* "GPy/util/choleskies_cython.pyx":24 * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) * cdef int d, m, mm * with nogil: # <<<<<<<<<<<<<< * for d in range(D): * count = 0 */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":25 * cdef int d, m, mm * with nogil: * for d in range(D): # <<<<<<<<<<<<<< * count = 0 * for m in range(M): */ __pyx_t_8 = __pyx_v_D; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_d = __pyx_t_9; /* "GPy/util/choleskies_cython.pyx":26 * with nogil: * for d in range(D): * count = 0 # <<<<<<<<<<<<<< * for m in range(M): * for mm in range(m+1): */ __pyx_v_count = 0; /* "GPy/util/choleskies_cython.pyx":27 * for d in range(D): * count = 0 * for m in range(M): # <<<<<<<<<<<<<< * for mm in range(m+1): * ret[d, m, mm] = flat[count,d] */ __pyx_t_10 = __pyx_v_M; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v_m = __pyx_t_11; /* "GPy/util/choleskies_cython.pyx":28 * count = 0 * for m in range(M): * for mm in range(m+1): # <<<<<<<<<<<<<< * ret[d, m, mm] = flat[count,d] * count += 1 */ __pyx_t_12 = (__pyx_v_m + 1); for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_mm = __pyx_t_13; /* "GPy/util/choleskies_cython.pyx":29 * for m in range(M): * for mm in range(m+1): * ret[d, m, mm] = flat[count,d] # <<<<<<<<<<<<<< * count += 1 * return ret */ __pyx_t_14 = __pyx_v_count; __pyx_t_15 = __pyx_v_d; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_flat.shape[0]; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_flat.shape[1]; __pyx_t_16 = __pyx_v_d; __pyx_t_17 = __pyx_v_m; __pyx_t_18 = __pyx_v_mm; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_ret.shape[0]; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_ret.shape[1]; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_ret.shape[2]; *((double *) ( /* dim=2 */ ((char *) (((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_ret.data + __pyx_t_16 * __pyx_v_ret.strides[0]) ) + __pyx_t_17 * __pyx_v_ret.strides[1]) )) + __pyx_t_18)) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_flat.data + __pyx_t_14 * __pyx_v_flat.strides[0]) ) + __pyx_t_15 * __pyx_v_flat.strides[1]) ))); /* "GPy/util/choleskies_cython.pyx":30 * for mm in range(m+1): * ret[d, m, mm] = flat[count,d] * count += 1 # <<<<<<<<<<<<<< * return ret * */ __pyx_v_count = (__pyx_v_count + 1); } } } } /* "GPy/util/choleskies_cython.pyx":24 * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) * cdef int d, m, mm * with nogil: # <<<<<<<<<<<<<< * for d in range(D): * count = 0 */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":31 * ret[d, m, mm] = flat[count,d] * count += 1 * return ret # <<<<<<<<<<<<<< * * def triang_to_flat(double[:, :, :] L): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_ret, 3, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":12 * cimport scipy.linalg.cython_blas as cblas * * def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<< * """take a matrix N x D and return a D X M x M array where * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __PYX_XDEC_MEMVIEW(&__pyx_t_7, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.flat_to_triang", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_ret, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_flat, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "GPy/util/choleskies_cython.pyx":33 * return ret * * def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<< * cdef int D = L.shape[0] * cdef int M = L.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_3triang_to_flat(PyObject *__pyx_self, PyObject *__pyx_arg_L); /*proto*/ static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_3triang_to_flat = {"triang_to_flat", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_3triang_to_flat, METH_O, 0}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_3triang_to_flat(PyObject *__pyx_self, PyObject *__pyx_arg_L) { __Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("triang_to_flat (wrapper)", 0); assert(__pyx_arg_L); { __pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(__pyx_arg_L); if (unlikely(!__pyx_v_L.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.triang_to_flat", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_2triang_to_flat(__pyx_self, __pyx_v_L); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_2triang_to_flat(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_L) { int __pyx_v_D; int __pyx_v_M; int __pyx_v_N; int __pyx_v_count; __Pyx_memviewslice __pyx_v_flat = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_d; int __pyx_v_m; int __pyx_v_mm; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("triang_to_flat", 0); /* "GPy/util/choleskies_cython.pyx":34 * * def triang_to_flat(double[:, :, :] L): * cdef int D = L.shape[0] # <<<<<<<<<<<<<< * cdef int M = L.shape[1] * cdef int N = M*(M+1)/2 */ __pyx_v_D = (__pyx_v_L.shape[0]); /* "GPy/util/choleskies_cython.pyx":35 * def triang_to_flat(double[:, :, :] L): * cdef int D = L.shape[0] * cdef int M = L.shape[1] # <<<<<<<<<<<<<< * cdef int N = M*(M+1)/2 * cdef int count = 0 */ __pyx_v_M = (__pyx_v_L.shape[1]); /* "GPy/util/choleskies_cython.pyx":36 * cdef int D = L.shape[0] * cdef int M = L.shape[1] * cdef int N = M*(M+1)/2 # <<<<<<<<<<<<<< * cdef int count = 0 * cdef double[:, ::1] flat = np.empty((N, D)) */ __pyx_v_N = __Pyx_div_long((__pyx_v_M * (__pyx_v_M + 1)), 2); /* "GPy/util/choleskies_cython.pyx":37 * cdef int M = L.shape[1] * cdef int N = M*(M+1)/2 * cdef int count = 0 # <<<<<<<<<<<<<< * cdef double[:, ::1] flat = np.empty((N, D)) * cdef int d, m, mm */ __pyx_v_count = 0; /* "GPy/util/choleskies_cython.pyx":38 * cdef int N = M*(M+1)/2 * cdef int count = 0 * cdef double[:, ::1] flat = np.empty((N, D)) # <<<<<<<<<<<<<< * cdef int d, m, mm * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_D); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL; PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_flat = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "GPy/util/choleskies_cython.pyx":40 * cdef double[:, ::1] flat = np.empty((N, D)) * cdef int d, m, mm * with nogil: # <<<<<<<<<<<<<< * for d in range(D): * count = 0 */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":41 * cdef int d, m, mm * with nogil: * for d in range(D): # <<<<<<<<<<<<<< * count = 0 * for m in range(M): */ __pyx_t_7 = __pyx_v_D; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_d = __pyx_t_8; /* "GPy/util/choleskies_cython.pyx":42 * with nogil: * for d in range(D): * count = 0 # <<<<<<<<<<<<<< * for m in range(M): * for mm in range(m+1): */ __pyx_v_count = 0; /* "GPy/util/choleskies_cython.pyx":43 * for d in range(D): * count = 0 * for m in range(M): # <<<<<<<<<<<<<< * for mm in range(m+1): * flat[count,d] = L[d, m, mm] */ __pyx_t_9 = __pyx_v_M; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_m = __pyx_t_10; /* "GPy/util/choleskies_cython.pyx":44 * count = 0 * for m in range(M): * for mm in range(m+1): # <<<<<<<<<<<<<< * flat[count,d] = L[d, m, mm] * count += 1 */ __pyx_t_11 = (__pyx_v_m + 1); for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_mm = __pyx_t_12; /* "GPy/util/choleskies_cython.pyx":45 * for m in range(M): * for mm in range(m+1): * flat[count,d] = L[d, m, mm] # <<<<<<<<<<<<<< * count += 1 * return flat */ __pyx_t_13 = __pyx_v_d; __pyx_t_14 = __pyx_v_m; __pyx_t_15 = __pyx_v_mm; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_v_L.shape[0]; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_L.shape[1]; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_L.shape[2]; __pyx_t_16 = __pyx_v_count; __pyx_t_17 = __pyx_v_d; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_flat.shape[0]; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_flat.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_flat.data + __pyx_t_16 * __pyx_v_flat.strides[0]) )) + __pyx_t_17)) )) = (*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_13 * __pyx_v_L.strides[0]) ) + __pyx_t_14 * __pyx_v_L.strides[1]) ) + __pyx_t_15 * __pyx_v_L.strides[2]) ))); /* "GPy/util/choleskies_cython.pyx":46 * for mm in range(m+1): * flat[count,d] = L[d, m, mm] * count += 1 # <<<<<<<<<<<<<< * return flat * */ __pyx_v_count = (__pyx_v_count + 1); } } } } /* "GPy/util/choleskies_cython.pyx":40 * cdef double[:, ::1] flat = np.empty((N, D)) * cdef int d, m, mm * with nogil: # <<<<<<<<<<<<<< * for d in range(D): * count = 0 */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":47 * flat[count,d] = L[d, m, mm] * count += 1 * return flat # <<<<<<<<<<<<<< * * def backprop_gradient(double[:, :] dL, double[:, :] L): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_flat, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":33 * return ret * * def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<< * cdef int D = L.shape[0] * cdef int M = L.shape[1] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.triang_to_flat", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_L, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_flat, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "GPy/util/choleskies_cython.pyx":49 * return flat * * def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_5backprop_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_5backprop_gradient = {"backprop_gradient", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_5backprop_gradient, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_5backprop_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_dL = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_gradient (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dL,&__pyx_n_s_L,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dL)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_gradient", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_gradient") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_dL = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_dL.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1]); if (unlikely(!__pyx_v_L.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_gradient", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_4backprop_gradient(__pyx_self, __pyx_v_dL, __pyx_v_L); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_4backprop_gradient(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) { __Pyx_memviewslice __pyx_v_dL_dK = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_N; int __pyx_v_k; int __pyx_v_j; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; int __pyx_t_19; int __pyx_t_20; int __pyx_t_21; int __pyx_t_22; int __pyx_t_23; int __pyx_t_24; int __pyx_t_25; int __pyx_t_26; int __pyx_t_27; int __pyx_t_28; int __pyx_t_29; int __pyx_t_30; int __pyx_t_31; int __pyx_t_32; int __pyx_t_33; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("backprop_gradient", 0); /* "GPy/util/choleskies_cython.pyx":50 * * def backprop_gradient(double[:, :] dL, double[:, :] L): * cdef double[:, ::1] dL_dK = np.tril(dL) # <<<<<<<<<<<<<< * cdef int N = L.shape[0] * cdef int k, j, i */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tril); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL; PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_dL_dK = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "GPy/util/choleskies_cython.pyx":51 * def backprop_gradient(double[:, :] dL, double[:, :] L): * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] # <<<<<<<<<<<<<< * cdef int k, j, i * with nogil: */ __pyx_v_N = (__pyx_v_L.shape[0]); /* "GPy/util/choleskies_cython.pyx":53 * cdef int N = L.shape[0] * cdef int k, j, i * with nogil: # <<<<<<<<<<<<<< * for k in range(N - 1, -1, -1): * for j in range(k + 1, N): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":54 * cdef int k, j, i * with nogil: * for k in range(N - 1, -1, -1): # <<<<<<<<<<<<<< * for j in range(k + 1, N): * for i in range(j, N): */ for (__pyx_t_7 = (__pyx_v_N - 1); __pyx_t_7 > -1; __pyx_t_7-=1) { __pyx_v_k = __pyx_t_7; /* "GPy/util/choleskies_cython.pyx":55 * with nogil: * for k in range(N - 1, -1, -1): * for j in range(k + 1, N): # <<<<<<<<<<<<<< * for i in range(j, N): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] */ __pyx_t_8 = __pyx_v_N; for (__pyx_t_9 = (__pyx_v_k + 1); __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_j = __pyx_t_9; /* "GPy/util/choleskies_cython.pyx":56 * for k in range(N - 1, -1, -1): * for j in range(k + 1, N): * for i in range(j, N): # <<<<<<<<<<<<<< * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] */ __pyx_t_10 = __pyx_v_N; for (__pyx_t_11 = __pyx_v_j; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "GPy/util/choleskies_cython.pyx":57 * for j in range(k + 1, N): * for i in range(j, N): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] # <<<<<<<<<<<<<< * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] * for j in range(k + 1, N): */ __pyx_t_12 = __pyx_v_i; __pyx_t_13 = __pyx_v_j; if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_v_dL_dK.shape[1]; __pyx_t_14 = __pyx_v_j; __pyx_t_15 = __pyx_v_k; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_L.shape[0]; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_L.shape[1]; __pyx_t_16 = __pyx_v_i; __pyx_t_17 = __pyx_v_k; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_16 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_17)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_12 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_13)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_14 * __pyx_v_L.strides[0]) ) + __pyx_t_15 * __pyx_v_L.strides[1]) )))); /* "GPy/util/choleskies_cython.pyx":58 * for i in range(j, N): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] # <<<<<<<<<<<<<< * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] */ __pyx_t_18 = __pyx_v_i; __pyx_t_19 = __pyx_v_j; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_dL_dK.shape[1]; __pyx_t_20 = __pyx_v_i; __pyx_t_21 = __pyx_v_k; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_L.shape[0]; if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_L.shape[1]; __pyx_t_22 = __pyx_v_j; __pyx_t_23 = __pyx_v_k; if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_22 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_23)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_18 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_19)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_20 * __pyx_v_L.strides[0]) ) + __pyx_t_21 * __pyx_v_L.strides[1]) )))); } } /* "GPy/util/choleskies_cython.pyx":59 * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] * for j in range(k + 1, N): # <<<<<<<<<<<<<< * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] */ __pyx_t_8 = __pyx_v_N; for (__pyx_t_9 = (__pyx_v_k + 1); __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_j = __pyx_t_9; /* "GPy/util/choleskies_cython.pyx":60 * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] # <<<<<<<<<<<<<< * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) */ __pyx_t_10 = __pyx_v_k; __pyx_t_11 = __pyx_v_k; if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_v_L.shape[0]; if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_v_L.shape[1]; __pyx_t_24 = __pyx_v_j; __pyx_t_25 = __pyx_v_k; if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_24 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_25)) )) /= (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_10 * __pyx_v_L.strides[0]) ) + __pyx_t_11 * __pyx_v_L.strides[1]) ))); /* "GPy/util/choleskies_cython.pyx":61 * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] # <<<<<<<<<<<<<< * dL_dK[k, k] /= (2. * L[k, k]) * return dL_dK */ __pyx_t_26 = __pyx_v_j; __pyx_t_27 = __pyx_v_k; if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_L.shape[0]; if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_v_L.shape[1]; __pyx_t_28 = __pyx_v_j; __pyx_t_29 = __pyx_v_k; if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_dL_dK.shape[1]; __pyx_t_30 = __pyx_v_k; __pyx_t_31 = __pyx_v_k; if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_30 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_31)) )) -= ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_26 * __pyx_v_L.strides[0]) ) + __pyx_t_27 * __pyx_v_L.strides[1]) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_28 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_29)) )))); } /* "GPy/util/choleskies_cython.pyx":62 * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) # <<<<<<<<<<<<<< * return dL_dK * */ __pyx_t_8 = __pyx_v_k; __pyx_t_9 = __pyx_v_k; if (__pyx_t_8 < 0) __pyx_t_8 += __pyx_v_L.shape[0]; if (__pyx_t_9 < 0) __pyx_t_9 += __pyx_v_L.shape[1]; __pyx_t_32 = __pyx_v_k; __pyx_t_33 = __pyx_v_k; if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_32 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_33)) )) /= (2. * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_8 * __pyx_v_L.strides[0]) ) + __pyx_t_9 * __pyx_v_L.strides[1]) )))); } } /* "GPy/util/choleskies_cython.pyx":53 * cdef int N = L.shape[0] * cdef int k, j, i * with nogil: # <<<<<<<<<<<<<< * for k in range(N - 1, -1, -1): * for j in range(k + 1, N): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":63 * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) * return dL_dK # <<<<<<<<<<<<<< * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_dL_dK, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":49 * return flat * * def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_dL_dK, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_dL, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_L, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "GPy/util/choleskies_cython.pyx":65 * return dL_dK * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<< * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_7backprop_gradient_par(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_7backprop_gradient_par = {"backprop_gradient_par", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_7backprop_gradient_par, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_7backprop_gradient_par(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_dL = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_gradient_par (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dL,&__pyx_n_s_L,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dL)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_gradient_par", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_gradient_par") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_dL = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_dL.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1]); if (unlikely(!__pyx_v_L.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_gradient_par", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_6backprop_gradient_par(__pyx_self, __pyx_v_dL, __pyx_v_L); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_6backprop_gradient_par(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) { __Pyx_memviewslice __pyx_v_dL_dK = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_N; int __pyx_v_k; int __pyx_v_j; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; long __pyx_t_8; int __pyx_t_9; long __pyx_t_10; long __pyx_t_11; long __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; int __pyx_t_19; int __pyx_t_20; int __pyx_t_21; int __pyx_t_22; int __pyx_t_23; int __pyx_t_24; int __pyx_t_25; int __pyx_t_26; int __pyx_t_27; int __pyx_t_28; int __pyx_t_29; int __pyx_t_30; int __pyx_t_31; int __pyx_t_32; int __pyx_t_33; int __pyx_t_34; int __pyx_t_35; int __pyx_t_36; int __pyx_t_37; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("backprop_gradient_par", 0); /* "GPy/util/choleskies_cython.pyx":66 * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): * cdef double[:,::1] dL_dK = np.tril(dL) # <<<<<<<<<<<<<< * cdef int N = L.shape[0] * cdef int k, j, i */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tril); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL; PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_dL_dK = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "GPy/util/choleskies_cython.pyx":67 * def backprop_gradient_par(double[:,:] dL, double[:,:] L): * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] # <<<<<<<<<<<<<< * cdef int k, j, i * with nogil: */ __pyx_v_N = (__pyx_v_L.shape[0]); /* "GPy/util/choleskies_cython.pyx":69 * cdef int N = L.shape[0] * cdef int k, j, i * with nogil: # <<<<<<<<<<<<<< * for k in range(N - 1, -1, -1): * with parallel(): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":70 * cdef int k, j, i * with nogil: * for k in range(N - 1, -1, -1): # <<<<<<<<<<<<<< * with parallel(): * for i in prange(k + 1, N): */ for (__pyx_t_7 = (__pyx_v_N - 1); __pyx_t_7 > -1; __pyx_t_7-=1) { __pyx_v_k = __pyx_t_7; /* "GPy/util/choleskies_cython.pyx":71 * with nogil: * for k in range(N - 1, -1, -1): * with parallel(): # <<<<<<<<<<<<<< * for i in prange(k + 1, N): * for j in range(k+1, i+1): */ { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_9, __pyx_t_15, __pyx_t_17, __pyx_t_10, __pyx_t_19, __pyx_t_16, __pyx_t_18, __pyx_t_24, __pyx_t_26, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_20, __pyx_t_22, __pyx_t_21, __pyx_t_11, __pyx_t_23, __pyx_t_25) #endif /* _OPENMP */ { /* "GPy/util/choleskies_cython.pyx":72 * for k in range(N - 1, -1, -1): * with parallel(): * for i in prange(k + 1, N): # <<<<<<<<<<<<<< * for j in range(k+1, i+1): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] */ __pyx_t_8 = (__pyx_v_k + 1); __pyx_t_9 = __pyx_v_N; if (1 == 0) abort(); { __pyx_t_11 = (__pyx_t_9 - __pyx_t_8) / 1; if (__pyx_t_11 > 0) { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_j) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_11; __pyx_t_10++){ { __pyx_v_i = __pyx_t_8 + 1 * __pyx_t_10; /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); /* "GPy/util/choleskies_cython.pyx":73 * with parallel(): * for i in prange(k + 1, N): * for j in range(k+1, i+1): # <<<<<<<<<<<<<< * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * for j in range(i, N): */ __pyx_t_12 = (__pyx_v_i + 1); for (__pyx_t_13 = (__pyx_v_k + 1); __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_j = __pyx_t_13; /* "GPy/util/choleskies_cython.pyx":74 * for i in prange(k + 1, N): * for j in range(k+1, i+1): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] # <<<<<<<<<<<<<< * for j in range(i, N): * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] */ __pyx_t_14 = __pyx_v_i; __pyx_t_15 = __pyx_v_j; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_dL_dK.shape[1]; __pyx_t_16 = __pyx_v_j; __pyx_t_17 = __pyx_v_k; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_L.shape[0]; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_L.shape[1]; __pyx_t_18 = __pyx_v_i; __pyx_t_19 = __pyx_v_k; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_18 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_19)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_14 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_15)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_16 * __pyx_v_L.strides[0]) ) + __pyx_t_17 * __pyx_v_L.strides[1]) )))); } /* "GPy/util/choleskies_cython.pyx":75 * for j in range(k+1, i+1): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * for j in range(i, N): # <<<<<<<<<<<<<< * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] * for j in range(k + 1, N): */ __pyx_t_13 = __pyx_v_N; for (__pyx_t_20 = __pyx_v_i; __pyx_t_20 < __pyx_t_13; __pyx_t_20+=1) { __pyx_v_j = __pyx_t_20; /* "GPy/util/choleskies_cython.pyx":76 * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * for j in range(i, N): * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] # <<<<<<<<<<<<<< * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] */ __pyx_t_21 = __pyx_v_j; __pyx_t_22 = __pyx_v_i; if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_dL_dK.shape[1]; __pyx_t_23 = __pyx_v_j; __pyx_t_24 = __pyx_v_k; if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_L.shape[0]; if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_L.shape[1]; __pyx_t_25 = __pyx_v_i; __pyx_t_26 = __pyx_v_k; if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_25 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_26)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_21 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_22)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_23 * __pyx_v_L.strides[0]) ) + __pyx_t_24 * __pyx_v_L.strides[1]) )))); } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif /* "GPy/util/choleskies_cython.pyx":77 * for j in range(i, N): * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] * for j in range(k + 1, N): # <<<<<<<<<<<<<< * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] */ __pyx_t_9 = __pyx_v_N; for (__pyx_t_13 = (__pyx_v_k + 1); __pyx_t_13 < __pyx_t_9; __pyx_t_13+=1) { __pyx_v_j = __pyx_t_13; /* "GPy/util/choleskies_cython.pyx":78 * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] # <<<<<<<<<<<<<< * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) */ __pyx_t_20 = __pyx_v_k; __pyx_t_27 = __pyx_v_k; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_L.shape[0]; if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_v_L.shape[1]; __pyx_t_28 = __pyx_v_j; __pyx_t_29 = __pyx_v_k; if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_28 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_29)) )) /= (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_20 * __pyx_v_L.strides[0]) ) + __pyx_t_27 * __pyx_v_L.strides[1]) ))); /* "GPy/util/choleskies_cython.pyx":79 * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] # <<<<<<<<<<<<<< * dL_dK[k, k] /= (2. * L[k, k]) * return dL_dK */ __pyx_t_30 = __pyx_v_j; __pyx_t_31 = __pyx_v_k; if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_L.shape[0]; if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_L.shape[1]; __pyx_t_32 = __pyx_v_j; __pyx_t_33 = __pyx_v_k; if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_v_dL_dK.shape[1]; __pyx_t_34 = __pyx_v_k; __pyx_t_35 = __pyx_v_k; if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_34 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_35)) )) -= ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_30 * __pyx_v_L.strides[0]) ) + __pyx_t_31 * __pyx_v_L.strides[1]) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_32 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_33)) )))); } /* "GPy/util/choleskies_cython.pyx":80 * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) # <<<<<<<<<<<<<< * return dL_dK * */ __pyx_t_9 = __pyx_v_k; __pyx_t_13 = __pyx_v_k; if (__pyx_t_9 < 0) __pyx_t_9 += __pyx_v_L.shape[0]; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_v_L.shape[1]; __pyx_t_36 = __pyx_v_k; __pyx_t_37 = __pyx_v_k; if (__pyx_t_36 < 0) __pyx_t_36 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_37 < 0) __pyx_t_37 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_36 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_37)) )) /= (2. * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_9 * __pyx_v_L.strides[0]) ) + __pyx_t_13 * __pyx_v_L.strides[1]) )))); } } /* "GPy/util/choleskies_cython.pyx":69 * cdef int N = L.shape[0] * cdef int k, j, i * with nogil: # <<<<<<<<<<<<<< * for k in range(N - 1, -1, -1): * with parallel(): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":81 * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) * return dL_dK # <<<<<<<<<<<<<< * * cdef void chol_backprop(int N, double[:, ::1] dL, double[:, ::1] L) nogil: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_dL_dK, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":65 * return dL_dK * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<< * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_dL_dK, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_dL, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_L, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "GPy/util/choleskies_cython.pyx":83 * return dL_dK * * cdef void chol_backprop(int N, double[:, ::1] dL, double[:, ::1] L) nogil: # <<<<<<<<<<<<<< * cdef int i, k, n * */ static void __pyx_f_3GPy_4util_17choleskies_cython_chol_backprop(int __pyx_v_N, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) { int __pyx_v_i; int __pyx_v_k; int __pyx_v_n; double __pyx_v_alpha; double __pyx_v_beta; int __pyx_v_incx; double __pyx_v_scale; long __pyx_t_1; long __pyx_t_2; long __pyx_t_3; long __pyx_t_4; int __pyx_t_5; long __pyx_t_6; long __pyx_t_7; long __pyx_t_8; int __pyx_t_9; long __pyx_t_10; int __pyx_t_11; long __pyx_t_12; int __pyx_t_13; long __pyx_t_14; long __pyx_t_15; long __pyx_t_16; int __pyx_t_17; long __pyx_t_18; int __pyx_t_19; int __pyx_t_20; double __pyx_t_21; int __pyx_t_22; long __pyx_t_23; int __pyx_t_24; long __pyx_t_25; int __pyx_t_26; int __pyx_t_27; int __pyx_t_28; int __pyx_t_29; int __pyx_t_30; int __pyx_t_31; int __pyx_t_32; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "GPy/util/choleskies_cython.pyx":87 * * # DSYMV required constant arguments * cdef double alpha=-1, beta=1 # <<<<<<<<<<<<<< * cdef int incx=N * */ __pyx_v_alpha = -1.0; __pyx_v_beta = 1.0; /* "GPy/util/choleskies_cython.pyx":88 * # DSYMV required constant arguments * cdef double alpha=-1, beta=1 * cdef int incx=N # <<<<<<<<<<<<<< * * # DSCAL required arguments */ __pyx_v_incx = __pyx_v_N; /* "GPy/util/choleskies_cython.pyx":93 * cdef double scale * * dL[N - 1, N - 1] /= (2. * L[N - 1, N - 1]) # <<<<<<<<<<<<<< * for k in range(N-2, -1, -1): * n = N-k-1 */ __pyx_t_1 = (__pyx_v_N - 1); __pyx_t_2 = (__pyx_v_N - 1); if (__pyx_t_1 < 0) __pyx_t_1 += __pyx_v_L.shape[0]; if (__pyx_t_2 < 0) __pyx_t_2 += __pyx_v_L.shape[1]; __pyx_t_3 = (__pyx_v_N - 1); __pyx_t_4 = (__pyx_v_N - 1); if (__pyx_t_3 < 0) __pyx_t_3 += __pyx_v_dL.shape[0]; if (__pyx_t_4 < 0) __pyx_t_4 += __pyx_v_dL.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_3 * __pyx_v_dL.strides[0]) )) + __pyx_t_4)) )) /= (2. * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_1 * __pyx_v_L.strides[0]) )) + __pyx_t_2)) )))); /* "GPy/util/choleskies_cython.pyx":94 * * dL[N - 1, N - 1] /= (2. * L[N - 1, N - 1]) * for k in range(N-2, -1, -1): # <<<<<<<<<<<<<< * n = N-k-1 * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, */ for (__pyx_t_5 = (__pyx_v_N - 2); __pyx_t_5 > -1; __pyx_t_5-=1) { __pyx_v_k = __pyx_t_5; /* "GPy/util/choleskies_cython.pyx":95 * dL[N - 1, N - 1] /= (2. * L[N - 1, N - 1]) * for k in range(N-2, -1, -1): * n = N-k-1 # <<<<<<<<<<<<<< * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, * beta=&beta, y=&dL[k + 1, k], incy=&N) */ __pyx_v_n = ((__pyx_v_N - __pyx_v_k) - 1); /* "GPy/util/choleskies_cython.pyx":96 * for k in range(N-2, -1, -1): * n = N-k-1 * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, # <<<<<<<<<<<<<< * beta=&beta, y=&dL[k + 1, k], incy=&N) * */ __pyx_t_6 = (__pyx_v_k + 1); __pyx_t_7 = (__pyx_v_k + 1); if (__pyx_t_6 < 0) __pyx_t_6 += __pyx_v_dL.shape[0]; if (__pyx_t_7 < 0) __pyx_t_7 += __pyx_v_dL.shape[1]; __pyx_t_8 = (__pyx_v_k + 1); __pyx_t_9 = __pyx_v_k; if (__pyx_t_8 < 0) __pyx_t_8 += __pyx_v_L.shape[0]; if (__pyx_t_9 < 0) __pyx_t_9 += __pyx_v_L.shape[1]; /* "GPy/util/choleskies_cython.pyx":97 * n = N-k-1 * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, * beta=&beta, y=&dL[k + 1, k], incy=&N) # <<<<<<<<<<<<<< * * for i in xrange(0, N - k - 1): */ __pyx_t_10 = (__pyx_v_k + 1); __pyx_t_11 = __pyx_v_k; if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_v_dL.shape[0]; if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_v_dL.shape[1]; /* "GPy/util/choleskies_cython.pyx":96 * for k in range(N-2, -1, -1): * n = N-k-1 * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, # <<<<<<<<<<<<<< * beta=&beta, y=&dL[k + 1, k], incy=&N) * */ __pyx_f_5scipy_6linalg_11cython_blas_dsymv(__pyx_k_u, (&__pyx_v_n), (&__pyx_v_alpha), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_6 * __pyx_v_dL.strides[0]) )) + __pyx_t_7)) )))), (&__pyx_v_N), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_8 * __pyx_v_L.strides[0]) )) + __pyx_t_9)) )))), (&__pyx_v_incx), (&__pyx_v_beta), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_10 * __pyx_v_dL.strides[0]) )) + __pyx_t_11)) )))), (&__pyx_v_N)); /* "GPy/util/choleskies_cython.pyx":99 * beta=&beta, y=&dL[k + 1, k], incy=&N) * * for i in xrange(0, N - k - 1): # <<<<<<<<<<<<<< * dL[k + 1 + i, k] -= dL[k + i+ 1, k + i + 1] * L[k + 1 + i, k] * */ __pyx_t_12 = ((__pyx_v_N - __pyx_v_k) - 1); for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_i = __pyx_t_13; /* "GPy/util/choleskies_cython.pyx":100 * * for i in xrange(0, N - k - 1): * dL[k + 1 + i, k] -= dL[k + i+ 1, k + i + 1] * L[k + 1 + i, k] # <<<<<<<<<<<<<< * * scale = 1.0 / L[k, k] */ __pyx_t_14 = ((__pyx_v_k + __pyx_v_i) + 1); __pyx_t_15 = ((__pyx_v_k + __pyx_v_i) + 1); if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_dL.shape[0]; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_dL.shape[1]; __pyx_t_16 = ((__pyx_v_k + 1) + __pyx_v_i); __pyx_t_17 = __pyx_v_k; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_L.shape[0]; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_L.shape[1]; __pyx_t_18 = ((__pyx_v_k + 1) + __pyx_v_i); __pyx_t_19 = __pyx_v_k; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_dL.shape[0]; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_dL.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_18 * __pyx_v_dL.strides[0]) )) + __pyx_t_19)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_14 * __pyx_v_dL.strides[0]) )) + __pyx_t_15)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_16 * __pyx_v_L.strides[0]) )) + __pyx_t_17)) )))); } /* "GPy/util/choleskies_cython.pyx":102 * dL[k + 1 + i, k] -= dL[k + i+ 1, k + i + 1] * L[k + 1 + i, k] * * scale = 1.0 / L[k, k] # <<<<<<<<<<<<<< * cblas.dscal(&n, &scale , &dL[k + 1, k], &N) * # */ __pyx_t_13 = __pyx_v_k; __pyx_t_20 = __pyx_v_k; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_v_L.shape[0]; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_L.shape[1]; __pyx_t_21 = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_13 * __pyx_v_L.strides[0]) )) + __pyx_t_20)) ))); if (unlikely(__pyx_t_21 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_scale = (1.0 / __pyx_t_21); /* "GPy/util/choleskies_cython.pyx":103 * * scale = 1.0 / L[k, k] * cblas.dscal(&n, &scale , &dL[k + 1, k], &N) # <<<<<<<<<<<<<< * # * dL[k, k] -= cblas.ddot(&n, &dL[k + 1, k], &N, &L[k+1, k], &incx) */ __pyx_t_12 = (__pyx_v_k + 1); __pyx_t_22 = __pyx_v_k; if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_v_dL.shape[0]; if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_dL.shape[1]; __pyx_f_5scipy_6linalg_11cython_blas_dscal((&__pyx_v_n), (&__pyx_v_scale), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_12 * __pyx_v_dL.strides[0]) )) + __pyx_t_22)) )))), (&__pyx_v_N)); /* "GPy/util/choleskies_cython.pyx":105 * cblas.dscal(&n, &scale , &dL[k + 1, k], &N) * # * dL[k, k] -= cblas.ddot(&n, &dL[k + 1, k], &N, &L[k+1, k], &incx) # <<<<<<<<<<<<<< * dL[k, k] /= (2.0 * L[k, k]) * */ __pyx_t_23 = (__pyx_v_k + 1); __pyx_t_24 = __pyx_v_k; if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_dL.shape[0]; if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_dL.shape[1]; __pyx_t_25 = (__pyx_v_k + 1); __pyx_t_26 = __pyx_v_k; if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_L.shape[0]; if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_L.shape[1]; __pyx_t_27 = __pyx_v_k; __pyx_t_28 = __pyx_v_k; if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_v_dL.shape[0]; if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_dL.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_27 * __pyx_v_dL.strides[0]) )) + __pyx_t_28)) )) -= __pyx_f_5scipy_6linalg_11cython_blas_ddot((&__pyx_v_n), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_23 * __pyx_v_dL.strides[0]) )) + __pyx_t_24)) )))), (&__pyx_v_N), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_25 * __pyx_v_L.strides[0]) )) + __pyx_t_26)) )))), (&__pyx_v_incx)); /* "GPy/util/choleskies_cython.pyx":106 * # * dL[k, k] -= cblas.ddot(&n, &dL[k + 1, k], &N, &L[k+1, k], &incx) * dL[k, k] /= (2.0 * L[k, k]) # <<<<<<<<<<<<<< * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): */ __pyx_t_29 = __pyx_v_k; __pyx_t_30 = __pyx_v_k; if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_L.shape[0]; if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_L.shape[1]; __pyx_t_31 = __pyx_v_k; __pyx_t_32 = __pyx_v_k; if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_dL.shape[0]; if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_dL.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_31 * __pyx_v_dL.strides[0]) )) + __pyx_t_32)) )) /= (2.0 * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_29 * __pyx_v_L.strides[0]) )) + __pyx_t_30)) )))); } /* "GPy/util/choleskies_cython.pyx":83 * return dL_dK * * cdef void chol_backprop(int N, double[:, ::1] dL, double[:, ::1] L) nogil: # <<<<<<<<<<<<<< * cdef int i, k, n * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("GPy.util.choleskies_cython.chol_backprop", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_L0:; } /* "GPy/util/choleskies_cython.pyx":108 * dL[k, k] /= (2.0 * L[k, k]) * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c = {"backprop_gradient_par_c", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_dL = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_gradient_par_c (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dL,&__pyx_n_s_L,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dL)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_gradient_par_c", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_gradient_par_c") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_dL = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_dL.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1]); if (unlikely(!__pyx_v_L.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_gradient_par_c", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_8backprop_gradient_par_c(__pyx_self, __pyx_v_dL, __pyx_v_L); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_8backprop_gradient_par_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) { __Pyx_memviewslice __pyx_v_dL_dK = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_L_cont = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_N; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("backprop_gradient_par_c", 0); /* "GPy/util/choleskies_cython.pyx":109 * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig # <<<<<<<<<<<<<< * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) * cdef int N = L.shape[0] */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tril); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL; PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_dL_dK = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "GPy/util/choleskies_cython.pyx":110 * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) # <<<<<<<<<<<<<< * cdef int N = L.shape[0] * with nogil: */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ascontiguousarray); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_L, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = NULL; PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_L_cont = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "GPy/util/choleskies_cython.pyx":111 * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) * cdef int N = L.shape[0] # <<<<<<<<<<<<<< * with nogil: * chol_backprop(N, dL_dK, L_cont) */ __pyx_v_N = (__pyx_v_L.shape[0]); /* "GPy/util/choleskies_cython.pyx":112 * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) * cdef int N = L.shape[0] * with nogil: # <<<<<<<<<<<<<< * chol_backprop(N, dL_dK, L_cont) * return np.asarray(dL_dK) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":113 * cdef int N = L.shape[0] * with nogil: * chol_backprop(N, dL_dK, L_cont) # <<<<<<<<<<<<<< * return np.asarray(dL_dK) */ __pyx_f_3GPy_4util_17choleskies_cython_chol_backprop(__pyx_v_N, __pyx_v_dL_dK, __pyx_v_L_cont); } /* "GPy/util/choleskies_cython.pyx":112 * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) * cdef int N = L.shape[0] * with nogil: # <<<<<<<<<<<<<< * chol_backprop(N, dL_dK, L_cont) * return np.asarray(dL_dK) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":114 * with nogil: * chol_backprop(N, dL_dK, L_cont) * return np.asarray(dL_dK) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_asarray); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_v_dL_dK, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_3) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = NULL; PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":108 * dL[k, k] /= (2.0 * L[k, k]) * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_dL_dK, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_L_cont, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_dL, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_L, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L11; } /*else*/ { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":247 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":249 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L14; } /*else*/ { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ switch (__pyx_v_t) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ case NPY_BYTE: __pyx_v_f = __pyx_k_b; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = __pyx_k_B; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = __pyx_k_h; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = __pyx_k_H; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = __pyx_k_i; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = __pyx_k_I; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = __pyx_k_l; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = __pyx_k_L; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = __pyx_k_q; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = __pyx_k_Q; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = __pyx_k_f; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = __pyx_k_d; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = __pyx_k_g; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = __pyx_k_Zf; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = __pyx_k_Zd; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = __pyx_k_Zg; break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = __pyx_k_O; break; default: /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} break; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; } /*else*/ { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_7; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":295 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":784 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 784; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":793 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":804 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":805 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; if (__pyx_t_6) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":817 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":819 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":824 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":825 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 104; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 105; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 108; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 113; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 102; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 100; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 103; goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":843 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /*else*/ { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":847 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L15:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":848 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /*else*/ { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":852 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":853 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":975 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":981 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; } /*else*/ { /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":983 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":116 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "View.MemoryView":117 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":116 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; char *__pyx_t_5; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":123 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":124 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":126 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":127 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":129 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":130 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if isinstance(format, unicode): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":132 * raise ValueError("itemsize <= 0 for cython.array") * * if isinstance(format, unicode): # <<<<<<<<<<<<<< * format = (<unicode>format).encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyUnicode_Check(__pyx_v_format); __pyx_t_4 = (__pyx_t_2 != 0); if (__pyx_t_4) { /* "View.MemoryView":133 * * if isinstance(format, unicode): * format = (<unicode>format).encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ if (unlikely(__pyx_v_format == Py_None)) { PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%s'", "encode"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_3 = PyUnicode_AsASCIIString(((PyObject*)__pyx_v_format)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; goto __pyx_L5; } __pyx_L5:; /* "View.MemoryView":134 * if isinstance(format, unicode): * format = (<unicode>format).encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":135 * format = (<unicode>format).encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->format = __pyx_t_5; /* "View.MemoryView":138 * * * self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyMem_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":139 * * self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":141 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":142 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":145 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_6 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_7); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_7); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":146 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":147 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); __pyx_t_7 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":148 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":145 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":151 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":152 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":153 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; goto __pyx_L10; } /* "View.MemoryView":154 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":155 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":156 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; goto __pyx_L10; } /*else*/ { /* "View.MemoryView":158 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L10:; /* "View.MemoryView":160 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":163 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":164 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_3 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":165 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":168 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":169 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":170 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":172 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":173 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":174 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":175 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":176 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } goto __pyx_L13; } __pyx_L13:; goto __pyx_L11; } __pyx_L11:; /* "View.MemoryView":116 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":179 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":180 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":181 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":182 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); goto __pyx_L3; } /* "View.MemoryView":183 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":184 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":185 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":186 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":187 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":188 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":189 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":190 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":191 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":192 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":193 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":194 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":196 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":197 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; goto __pyx_L5; } /*else*/ { /* "View.MemoryView":199 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":201 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":179 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":205 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":206 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":207 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); goto __pyx_L3; } /* "View.MemoryView":208 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":209 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":210 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); goto __pyx_L4; } __pyx_L4:; /* "View.MemoryView":212 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyMem_Free(self._shape) * */ free(__pyx_v_self->data); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":213 * self._strides, self.ndim, False) * free(self.data) * PyMem_Free(self._shape) # <<<<<<<<<<<<<< * * property memview: */ PyMem_Free(__pyx_v_self->_shape); /* "View.MemoryView":205 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":217 * property memview: * @cname('get_memview') * def __get__(self): # <<<<<<<<<<<<<< * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE */ /* Python wrapper */ static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/ static PyObject *get_memview(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":219 * def __get__(self): * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":220 * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":217 * property memview: * @cname('get_memview') * def __get__(self): # <<<<<<<<<<<<<< * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":223 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":224 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":223 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":227 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":229 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":230 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":229 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":234 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":238 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":239 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":241 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":242 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":241 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":243 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":245 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":234 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":271 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":272 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":271 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":273 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":274 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":273 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":288 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":290 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":294 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":296 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":297 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":299 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview') */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":288 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":317 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":318 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":319 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":320 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)((PyObject *)__pyx_memoryview_type))); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":321 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 321; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":322 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":323 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":324 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * self.lock = PyThread_allocate_lock() */ Py_INCREF(Py_None); goto __pyx_L6; } __pyx_L6:; goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":326 * Py_INCREF(Py_None) * * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock == NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":327 * * self.lock = PyThread_allocate_lock() * if self.lock == NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":328 * self.lock = PyThread_allocate_lock() * if self.lock == NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":330 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = self.view.format == b'O' * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":331 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = self.view.format == b'O' # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_self->dtype_is_object = __pyx_t_1; goto __pyx_L8; } /*else*/ { /* "View.MemoryView":333 * self.dtype_is_object = self.view.format == b'O' * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L8:; /* "View.MemoryView":335 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":337 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":317 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":339 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":340 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":341 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * if self.lock != NULL: */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":343 * __Pyx_ReleaseBuffer(&self.view) * * if self.lock != NULL: # <<<<<<<<<<<<<< * PyThread_free_lock(self.lock) * */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":344 * * if self.lock != NULL: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); goto __pyx_L4; } __pyx_L4:; /* "View.MemoryView":339 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":346 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":348 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":350 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":351 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":350 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":353 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":346 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":356 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":357 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":358 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; } /* "View.MemoryView":360 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":363 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 363; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_2) { /* "View.MemoryView":364 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /*else*/ { /* "View.MemoryView":366 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":367 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":356 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":369 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":370 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":372 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":373 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":374 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":375 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L4; } /*else*/ { /* "View.MemoryView":377 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":379 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":369 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":381 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":382 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, ((PyObject *)__pyx_memoryview_type)); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":384 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":385 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":384 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L11_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":386 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":387 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L11_try_end:; } goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":389 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":381 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":391 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":395 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":396 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":397 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":395 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":391 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":399 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[128]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":401 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":406 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":408 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":409 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":410 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":411 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":412 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":414 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":416 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":417 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":418 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); goto __pyx_L8; } /*else*/ { /* "View.MemoryView":420 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L6_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":424 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":425 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 425; __pyx_clineno = __LINE__; goto __pyx_L6_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L9; } __pyx_L9:; /* "View.MemoryView":426 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":429 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } /*exception exit:*/{ __pyx_L6_error:; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":399 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":432 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 432; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":433 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 433; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":431 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":435 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":438 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 438; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":441 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":442 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":443 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = NULL; } PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; } /*else:*/ { /* "View.MemoryView":447 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":448 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 448; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}; __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; } /* "View.MemoryView":449 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":444 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_12 = PyErr_ExceptionMatches(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_12) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); /* "View.MemoryView":445 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} } goto __pyx_L5_except_error; __pyx_L5_except_error:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":435 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":451 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; Py_ssize_t __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; char *__pyx_t_10; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":454 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":459 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":460 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":462 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL; } PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":464 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_9 = __pyx_v_bytesvalue; __pyx_t_11 = PyBytes_AS_STRING(__pyx_t_9); __pyx_t_12 = (__pyx_t_11 + PyBytes_GET_SIZE(__pyx_t_9)); for (__pyx_t_13 = __pyx_t_11; __pyx_t_13 < __pyx_t_12; __pyx_t_13++) { __pyx_t_10 = __pyx_t_13; __pyx_v_c = (__pyx_t_10[0]); /* "View.MemoryView":465 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_7; /* "View.MemoryView":464 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":465 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":451 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":468 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":469 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":470 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":472 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":474 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":475 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; goto __pyx_L4; } /*else*/ { /* "View.MemoryView":477 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":479 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":480 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; goto __pyx_L5; } /*else*/ { /* "View.MemoryView":482 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":484 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":485 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; goto __pyx_L6; } /*else*/ { /* "View.MemoryView":487 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":489 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":490 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":491 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":492 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":493 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":494 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":468 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * property T: * @cname('__pyx_memoryview_transpose') * def __get__(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":502 * @cname('__pyx_memoryview_transpose') * def __get__(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":503 * def __get__(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":504 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * property base: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":501 * property T: * @cname('__pyx_memoryview_transpose') * def __get__(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":508 * property base: * @cname('__pyx_memoryview__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":509 * @cname('__pyx_memoryview__get__base') * def __get__(self): * return self.obj # <<<<<<<<<<<<<< * * property shape: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":508 * property base: * @cname('__pyx_memoryview__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":513 * property shape: * @cname('__pyx_memoryview_get_shape') * def __get__(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":514 * @cname('__pyx_memoryview_get_shape') * def __get__(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * property strides: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":513 * property shape: * @cname('__pyx_memoryview_get_shape') * def __get__(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * property strides: * @cname('__pyx_memoryview_get_strides') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":519 * @cname('__pyx_memoryview_get_strides') * def __get__(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":521 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":523 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * property suboffsets: */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":518 * property strides: * @cname('__pyx_memoryview_get_strides') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":527 * property suboffsets: * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":528 * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":529 * def __get__(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__14, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":531 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * property ndim: */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":527 * property suboffsets: * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":535 * property ndim: * @cname('__pyx_memoryview_get_ndim') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":536 * @cname('__pyx_memoryview_get_ndim') * def __get__(self): * return self.view.ndim # <<<<<<<<<<<<<< * * property itemsize: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 536; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":535 * property ndim: * @cname('__pyx_memoryview_get_ndim') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":540 * property itemsize: * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":541 * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * property nbytes: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 541; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":540 * property itemsize: * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":545 * property nbytes: * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":546 * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * property size: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":545 * property nbytes: * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":550 * property size: * @cname('__pyx_memoryview_get_size') * def __get__(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":551 * @cname('__pyx_memoryview_get_size') * def __get__(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":552 * def __get__(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":554 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":555 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 555; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":557 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":559 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":550 * property size: * @cname('__pyx_memoryview_get_size') * def __get__(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":561 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":562 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":563 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; } /* "View.MemoryView":565 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":561 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":568 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":569 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":568 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":567 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":571 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":572 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":571 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":578 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice, 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":579 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice, 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":581 * return slice_is_contig(mslice, 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":584 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice, 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":585 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice, 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":581 * return slice_is_contig(mslice, 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":587 * return slice_is_contig(mslice, 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":589 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":591 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":592 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), __pyx_k_c, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":597 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":587 * return slice_is_contig(mslice, 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":599 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":601 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":603 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":604 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), __pyx_k_fortran, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":609 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":599 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":613 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":614 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":615 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":616 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":613 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":620 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, ((PyObject *)__pyx_memoryview_type)); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":619 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":622 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":627 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":628 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 628; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":630 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":632 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":633 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":634 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":635 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = PyNumber_Add(__pyx_t_3, __pyx_int_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":636 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":637 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":638 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":639 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; goto __pyx_L7; } /*else*/ { /* "View.MemoryView":641 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L7:; /* "View.MemoryView":642 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; goto __pyx_L6; } /*else*/ { /* "View.MemoryView":644 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":645 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":647 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":648 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L6:; /* "View.MemoryView":635 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":650 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":651 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":652 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__17); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__17); __Pyx_GIVEREF(__pyx_slice__17); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L13; } __pyx_L13:; /* "View.MemoryView":654 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; /* "View.MemoryView":622 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":656 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":657 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":658 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":659 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } /* "View.MemoryView":656 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":667 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":674 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":678 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /* "View.MemoryView":680 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":681 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":682 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); goto __pyx_L3; } /*else*/ { /* "View.MemoryView":684 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":685 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":691 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":692 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":697 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":698 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":702 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":703 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":707 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":704 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } /* "View.MemoryView":710 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":711 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":712 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":713 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1; /* "View.MemoryView":714 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); goto __pyx_L6; } /*else*/ { /* "View.MemoryView":716 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":717 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":718 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":720 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 720; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":721 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":722 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":724 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":730 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":702 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":732 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":733 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":734 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":735 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":733 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 733; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 733; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /*else*/ { /* "View.MemoryView":738 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":739 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 738; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":738 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 738; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":666 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":763 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":783 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":785 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":786 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); goto __pyx_L4; } __pyx_L4:; /* "View.MemoryView":787 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":788 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":791 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":793 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":794 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "View.MemoryView":797 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":798 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":799 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":800 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":801 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; goto __pyx_L13; } __pyx_L13:; goto __pyx_L12; } /* "View.MemoryView":802 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":803 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":804 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); goto __pyx_L14; } /*else*/ { /* "View.MemoryView":806 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_start = __pyx_v_shape; } __pyx_L14:; goto __pyx_L12; } __pyx_L12:; goto __pyx_L11; } /*else*/ { /* "View.MemoryView":808 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":809 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); goto __pyx_L15; } /*else*/ { /* "View.MemoryView":811 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":813 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":814 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":815 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":816 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":817 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; goto __pyx_L18; } __pyx_L18:; goto __pyx_L17; } /* "View.MemoryView":818 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":819 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; goto __pyx_L17; } __pyx_L17:; goto __pyx_L16; } /*else*/ { /* "View.MemoryView":821 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":822 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1; goto __pyx_L19; } /*else*/ { /* "View.MemoryView":824 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":826 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":827 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; goto __pyx_L20; } __pyx_L20:; /* "View.MemoryView":831 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":833 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":834 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); goto __pyx_L21; } __pyx_L21:; /* "View.MemoryView":836 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":837 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; goto __pyx_L22; } __pyx_L22:; /* "View.MemoryView":840 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":841 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":842 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":845 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":846 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); goto __pyx_L23; } /*else*/ { /* "View.MemoryView":848 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":850 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":851 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":852 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); goto __pyx_L26; } /*else*/ { /* "View.MemoryView":855 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 855; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L26:; goto __pyx_L25; } /*else*/ { /* "View.MemoryView":858 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; goto __pyx_L24; } __pyx_L24:; /* "View.MemoryView":860 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":763 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":866 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":868 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1; /* "View.MemoryView":869 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":872 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":873 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[2]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[2]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":874 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":876 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":877 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":878 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":879 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); goto __pyx_L4; } __pyx_L4:; } __pyx_L3:; /* "View.MemoryView":881 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":882 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":883 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":884 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } goto __pyx_L5; } __pyx_L5:; /* "View.MemoryView":886 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":887 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":889 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":890 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":891 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); goto __pyx_L8; } __pyx_L8:; /* "View.MemoryView":893 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":866 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":899 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":900 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":902 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":903 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":907 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":908 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":909 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":910 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":912 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L6_bool_binop_done; } __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L6_bool_binop_done:; if (__pyx_t_6) { /* "View.MemoryView":913 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, __pyx_k_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 913; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; } /* "View.MemoryView":915 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":899 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":932 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":933 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":932 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":935 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":936 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":937 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 937; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /*else*/ { /* "View.MemoryView":939 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 939; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":935 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":941 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":942 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":943 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 943; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } /*else*/ { /* "View.MemoryView":945 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * property base: */ __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":941 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":949 * property base: * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":950 * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":949 * property base: * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":956 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":964 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":965 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; } /* "View.MemoryView":970 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 970; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 970; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_GIVEREF(Py_None); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryviewslice_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 970; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":972 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":973 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":975 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 975; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":976 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":978 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":979 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":980 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":981 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":982 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":984 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":986 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":987 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":990 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":991 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":992 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":993 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":994 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L5_break; } } __pyx_L5_break:; /* "View.MemoryView":996 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":997 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 997; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":998 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1000 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1001 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1003 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":956 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1006 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1009 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1010 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1011 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; } /*else*/ { /* "View.MemoryView":1013 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1014 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1006 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1017 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1021 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1022 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1023 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1025 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1026 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1028 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1029 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1030 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1031 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_4 = -1; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4; } /* "View.MemoryView":1017 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1034 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1037 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1038 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1034 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1041 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1048 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1049 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1050 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":1052 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ __pyx_v_to_object_func = NULL; /* "View.MemoryView":1053 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1055 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1057 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1055; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1041 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1064 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1065 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; } /*else*/ { /* "View.MemoryView":1067 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1063 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1070 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1075 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1076 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1078 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1079 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1080 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1081 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; } } __pyx_L4_break:; /* "View.MemoryView":1083 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1084 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1085 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1086 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; } } __pyx_L7_break:; /* "View.MemoryView":1088 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1089 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; } /*else*/ { /* "View.MemoryView":1091 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1070 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1094 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "View.MemoryView":1101 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1102 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1103 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1104 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1106 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1107 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1108 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":1109 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); goto __pyx_L4; } /*else*/ { /* "View.MemoryView":1111 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1112 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1113 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1114 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":1116 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1117 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1121 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1122 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1094 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1124 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1127 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1124 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1131 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1134 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1136 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1137 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1139 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1131 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1142 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1151 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1152 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1153 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1154 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } goto __pyx_L3; } /*else*/ { /* "View.MemoryView":1156 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1157 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1158 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1160 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1142 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1163 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1174 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1175 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1177 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1178 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1179 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1179; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":1182 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1183 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1184 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1185 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1186 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1; } /* "View.MemoryView":1188 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1192 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1193 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1194 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src, order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; goto __pyx_L8; } __pyx_L8:; } /* "View.MemoryView":1196 * tmpslice.strides[i] = 0 * * if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1197 * * if slice_is_contig(src, order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); goto __pyx_L9; } /*else*/ { /* "View.MemoryView":1199 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1201 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1163 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1206 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1209 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1208 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1206 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1212 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1213 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = NULL; PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1212 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1216 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1217 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1218 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL; PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /*else*/ { /* "View.MemoryView":1220 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ __Pyx_Raise(__pyx_v_error, 0, 0, 0); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":1216 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1223 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1231 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1232 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1234 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1235 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1236 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1239 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1240 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); goto __pyx_L3; } /* "View.MemoryView":1241 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":1244 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1246 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1247 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1248 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1249 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1250 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; goto __pyx_L7; } /*else*/ { /* "View.MemoryView":1252 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1252; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L7:; goto __pyx_L6; } __pyx_L6:; /* "View.MemoryView":1254 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1255 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1255; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; } /* "View.MemoryView":1257 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(&src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1259 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig((&__pyx_v_src), __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1260 * * if not slice_is_contig(&src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); goto __pyx_L10; } __pyx_L10:; /* "View.MemoryView":1262 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1263 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; goto __pyx_L9; } __pyx_L9:; /* "View.MemoryView":1265 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1268 * * * if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1269 * * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(&src, 'F', ndim): * direct_copy = slice_is_contig(&dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'C', __pyx_v_ndim); goto __pyx_L12; } /* "View.MemoryView":1270 * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1271 * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): * direct_copy = slice_is_contig(&dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'F', __pyx_v_ndim); goto __pyx_L12; } __pyx_L12:; /* "View.MemoryView":1273 * direct_copy = slice_is_contig(&dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1275 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1276 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1277 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1278 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1279 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; } goto __pyx_L11; } __pyx_L11:; /* "View.MemoryView":1281 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1284 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1284; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1285 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1285; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L14; } __pyx_L14:; /* "View.MemoryView":1287 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1288 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1289 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1291 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1292 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1223 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1295 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1299 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1301 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1302 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1303 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1304 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1306 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1307 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1308 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1309 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1; } /* "View.MemoryView":1295 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1317 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1321 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1322 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":1317 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1326 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1329 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1326 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1332 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1336 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1337 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1338 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1339 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); goto __pyx_L6; } /*else*/ { /* "View.MemoryView":1341 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; goto __pyx_L5; } /*else*/ { /* "View.MemoryView":1343 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1346 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1332 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1352 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1355 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1356 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1358 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1352 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1366 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1367 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1369 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1370 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1371 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1372 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } goto __pyx_L3; } /*else*/ { /* "View.MemoryView":1374 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1375 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1377 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1362 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return get_memview(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { 0, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { 0, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "GPy.util.choleskies_cython.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #else 0, /*reserved*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "GPy.util.choleskies_cython.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #else 0, /*reserved*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_transpose(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview__get__base(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_shape(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_strides(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_suboffsets(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_ndim(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_itemsize(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_nbytes(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_size(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, 0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, 0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, 0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, 0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, 0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, 0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, 0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, 0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "GPy.util.choleskies_cython.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #else 0, /*reserved*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryviewslice__get__base(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "GPy.util.choleskies_cython._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #else 0, /*reserved*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "choleskies_cython", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_GPy_util_choleskies_cython, __pyx_k_GPy_util_choleskies_cython, sizeof(__pyx_k_GPy_util_choleskies_cython), 0, 0, 1, 1}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_L, __pyx_k_L, sizeof(__pyx_k_L), 0, 0, 1, 1}, {&__pyx_n_s_L_cont, __pyx_k_L_cont, sizeof(__pyx_k_L_cont), 0, 0, 1, 1}, {&__pyx_n_s_M, __pyx_k_M, sizeof(__pyx_k_M), 0, 0, 1, 1}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1}, {&__pyx_n_s_ascontiguousarray, __pyx_k_ascontiguousarray, sizeof(__pyx_k_ascontiguousarray), 0, 0, 1, 1}, {&__pyx_n_s_backprop_gradient, __pyx_k_backprop_gradient, sizeof(__pyx_k_backprop_gradient), 0, 0, 1, 1}, {&__pyx_n_s_backprop_gradient_par, __pyx_k_backprop_gradient_par, sizeof(__pyx_k_backprop_gradient_par), 0, 0, 1, 1}, {&__pyx_n_s_backprop_gradient_par_c, __pyx_k_backprop_gradient_par_c, sizeof(__pyx_k_backprop_gradient_par_c), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1}, {&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1}, {&__pyx_n_s_dL, __pyx_k_dL, sizeof(__pyx_k_dL), 0, 0, 1, 1}, {&__pyx_n_s_dL_dK, __pyx_k_dL_dK, sizeof(__pyx_k_dL_dK), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_flat, __pyx_k_flat, sizeof(__pyx_k_flat), 0, 0, 1, 1}, {&__pyx_n_s_flat_to_triang, __pyx_k_flat_to_triang, sizeof(__pyx_k_flat_to_triang), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_k_home_james_work_GPy_GPy_util_ch, sizeof(__pyx_k_home_james_work_GPy_GPy_util_ch), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mm, __pyx_k_mm, sizeof(__pyx_k_mm), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_ret, __pyx_k_ret, sizeof(__pyx_k_ret), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_triang_to_flat, __pyx_k_triang_to_flat, sizeof(__pyx_k_triang_to_flat), 0, 0, 1, 1}, {&__pyx_n_s_tril, __pyx_k_tril, sizeof(__pyx_k_tril), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_xrange, __pyx_k_xrange, sizeof(__pyx_k_xrange), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":127 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":130 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if isinstance(format, unicode): */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":142 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":170 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":186 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":445 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":521 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":529 * def __get__(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__14 = PyTuple_New(1); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__14); __Pyx_INCREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__14, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":638 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":641 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); /* "View.MemoryView":652 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__17 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__17)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_slice__17); __Pyx_GIVEREF(__pyx_slice__17); /* "View.MemoryView":659 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__18)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "GPy/util/choleskies_cython.pyx":12 * cimport scipy.linalg.cython_blas as cblas * * def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<< * """take a matrix N x D and return a D X M x M array where * */ __pyx_tuple__19 = PyTuple_Pack(9, __pyx_n_s_flat, __pyx_n_s_M, __pyx_n_s_D, __pyx_n_s_N, __pyx_n_s_count, __pyx_n_s_ret, __pyx_n_s_d, __pyx_n_s_m, __pyx_n_s_mm); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(2, 0, 9, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_flat_to_triang, 12, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "GPy/util/choleskies_cython.pyx":33 * return ret * * def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<< * cdef int D = L.shape[0] * cdef int M = L.shape[1] */ __pyx_tuple__21 = PyTuple_Pack(10, __pyx_n_s_L, __pyx_n_s_L, __pyx_n_s_D, __pyx_n_s_M, __pyx_n_s_N, __pyx_n_s_count, __pyx_n_s_flat, __pyx_n_s_d, __pyx_n_s_m, __pyx_n_s_mm); if (unlikely(!__pyx_tuple__21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(1, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_triang_to_flat, 33, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "GPy/util/choleskies_cython.pyx":49 * return flat * * def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ __pyx_tuple__23 = PyTuple_Pack(7, __pyx_n_s_dL, __pyx_n_s_L, __pyx_n_s_dL_dK, __pyx_n_s_N, __pyx_n_s_k, __pyx_n_s_j, __pyx_n_s_i); if (unlikely(!__pyx_tuple__23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_backprop_gradient, 49, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "GPy/util/choleskies_cython.pyx":65 * return dL_dK * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<< * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ __pyx_tuple__25 = PyTuple_Pack(7, __pyx_n_s_dL, __pyx_n_s_L, __pyx_n_s_dL_dK, __pyx_n_s_N, __pyx_n_s_k, __pyx_n_s_j, __pyx_n_s_i); if (unlikely(!__pyx_tuple__25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_backprop_gradient_par, 65, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "GPy/util/choleskies_cython.pyx":108 * dL[k, k] /= (2.0 * L[k, k]) * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) */ __pyx_tuple__27 = PyTuple_Pack(5, __pyx_n_s_dL, __pyx_n_s_L, __pyx_n_s_dL_dK, __pyx_n_s_L_cont, __pyx_n_s_N); if (unlikely(!__pyx_tuple__27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(2, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_backprop_gradient_par_c, 108, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":276 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__29)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); /* "View.MemoryView":277 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__30)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__30); __Pyx_GIVEREF(__pyx_tuple__30); /* "View.MemoryView":278 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__31)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); /* "View.MemoryView":281 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__32 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__32)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__32); __Pyx_GIVEREF(__pyx_tuple__32); /* "View.MemoryView":282 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__33 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__33)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__33); __Pyx_GIVEREF(__pyx_tuple__33); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initcholeskies_cython(void); /*proto*/ PyMODINIT_FUNC initcholeskies_cython(void) #else PyMODINIT_FUNC PyInit_choleskies_cython(void); /*proto*/ PyMODINIT_FUNC PyInit_choleskies_cython(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_choleskies_cython(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("choleskies_cython", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if (__pyx_module_is_main_GPy__util__choleskies_cython) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "GPy.util.choleskies_cython")) { if (unlikely(PyDict_SetItemString(modules, "GPy.util.choleskies_cython", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type___pyx_array) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_array.tp_print = 0; __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 269; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_MemviewEnum.tp_print = 0; __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 921; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 921; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ __pyx_t_1 = __Pyx_ImportModule("scipy.linalg.cython_blas"); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_ImportFunction(__pyx_t_1, "ddot", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_ddot, "__pyx_t_5scipy_6linalg_11cython_blas_d (int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_ImportFunction(__pyx_t_1, "dscal", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_dscal, "void (int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_ImportFunction(__pyx_t_1, "dsymv", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_dsymv, "void (char *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*--- Execution code ---*/ /* "GPy/util/choleskies_cython.pyx":7 * # Copyright James Hensman and Alan Saul 2015 * * import numpy as np # <<<<<<<<<<<<<< * from cython.parallel import prange, parallel * cimport numpy as np */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "GPy/util/choleskies_cython.pyx":12 * cimport scipy.linalg.cython_blas as cblas * * def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<< * """take a matrix N x D and return a D X M x M array where * */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_1flat_to_triang, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_flat_to_triang, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "GPy/util/choleskies_cython.pyx":33 * return ret * * def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<< * cdef int D = L.shape[0] * cdef int M = L.shape[1] */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_3triang_to_flat, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_triang_to_flat, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "GPy/util/choleskies_cython.pyx":49 * return flat * * def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_5backprop_gradient, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_backprop_gradient, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "GPy/util/choleskies_cython.pyx":65 * return dL_dK * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<< * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_7backprop_gradient_par, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_backprop_gradient_par, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "GPy/util/choleskies_cython.pyx":108 * dL[k, k] /= (2.0 * L[k, k]) * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_backprop_gradient_par_c, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "GPy/util/choleskies_cython.pyx":1 * #cython: wraparaound=False # <<<<<<<<<<<<<< * #cython: boundscheck=False * #cython: nonecheck=False */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":203 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":276 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":277 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":278 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":281 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":282 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__33, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":496 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":952 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "View.MemoryView":1362 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init GPy.util.choleskies_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init GPy.util.choleskies_cython"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject* args = PyTuple_Pack(1, arg); return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL; } #endif static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) { va_list vargs; char msg[200]; va_start(vargs, fmt); #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); Py_FatalError(msg); va_end(vargs); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { if (PyObject_IsSubclass(instance_class, type)) { type = instance_class; } else { instance_class = NULL; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(tmp_type, tmp_value, tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_COMPILING_IN_CPYTHON #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { length = strlen(cstring); if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); #else PyErr_GetExcInfo(type, value, tb); #endif } static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(type, value, tb); #endif } static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { PyObject *local_type, *local_value, *local_tb; #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_COMPILING_IN_CPYTHON tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #else PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (PyErr_ExceptionMatches(PyExc_OverflowError)) PyErr_Clear(); else return NULL; } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } Py_DECREF(obj); view->obj = NULL; } #endif static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value) \ { \ func_type value = func_value; \ if (sizeof(target_type) < sizeof(func_type)) { \ if (unlikely(value != (func_type) (target_type) value)) { \ func_type zero = 0; \ if (is_unsigned && unlikely(value < zero)) \ goto raise_neg_overflow; \ else \ goto raise_overflow; \ } \ } \ return (target_type) value; \ } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(x)) { case 0: return 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, ((PyLongObject*)x)->ob_digit[0]); } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(x)) { case 0: return 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, +(((PyLongObject*)x)->ob_digit[0])); case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]); } #endif #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 3, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } static PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs->memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs->suboffsets[index] >= 0 || mvs->strides[index] != itemsize) return 0; itemsize *= mvs->shape[index]; } return 1; } static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(x)) { case 0: return 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, ((PyLongObject*)x)->ob_digit[0]); } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(char, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(char) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(char, unsigned long long, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(x)) { case 0: return 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, +(((PyLongObject*)x)->ob_digit[0])); case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]); } #endif #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyLong_AsLong(x)) } else if (sizeof(char) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(char, long long, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(x)) { case 0: return 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, ((PyLongObject*)x)->ob_digit[0]); } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(x)) { case 0: return 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, +(((PyLongObject*)x)->ob_digit[0])); case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]); } #endif #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 3, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif #ifndef __PYX_HAVE_RT_ImportFunction #define __PYX_HAVE_RT_ImportFunction static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!d) goto bad; cobj = PyDict_GetItemString(d, funcname); if (!cobj) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C function %.200s", PyModule_GetName(module), funcname); goto bad; } #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); goto bad; } tmp.p = PyCapsule_GetPointer(cobj, sig); #else {const char *desc, *s1, *s2; desc = (const char *)PyCObject_GetDesc(cobj); if (!desc) goto bad; s1 = desc; s2 = sig; while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } if (*s1 != *s2) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, desc); goto bad; } tmp.p = PyCObject_AsVoidPtr(cobj);} #endif *f = tmp.fp; if (!(*f)) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } #endif static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if !CYTHON_COMPILING_IN_PYPY if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) return PyInt_AS_LONG(b); #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(b)) { case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0]; case 0: return 0; case 1: return ((PyLongObject*)b)->ob_digit[0]; } #endif #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
rar_fmt_plug.c
/* RAR 3.x cracker patch for JtR. Hacked together during * April of 2011 by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC. * magnum added -p mode support, using code based on libclamav * and OMP, AES-NI and OpenCL support. * jimf added dyna_salt support, Oct 2014. * * This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright (c) 2012, magnum and it is hereby released to the general public * under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This code is based on the work of Alexander L. Roshal (C) * * The unRAR sources may be used in any software to handle RAR * archives without limitations free of charge, but cannot be used * to re-create the RAR compression algorithm, which is proprietary. * Distribution of modified unRAR sources in separate form or as a * part of other software is permitted, provided that it is clearly * stated in the documentation and source comments that the code may * not be used to develop a RAR (WinRAR) compatible archiver. * * Huge thanks to Marc Bevand <m.bevand (at) gmail.com> for releasing unrarhp * (http://www.zorinaq.com/unrarhp/) and documenting the RAR encryption scheme. * This patch is made possible by unrarhp's documentation. * * http://anrieff.net/ucbench/technical_qna.html is another useful reference * for RAR encryption scheme. * * Thanks also to Pavel Semjanov for crucial help with Huffman table checks. * * For type = 0 for files encrypted with "rar -hp ..." option * archive_name:$RAR3$*type*hex(salt)*hex(partial-file-contents):type::::archive_name * * For type = 1 for files encrypted with "rar -p ..." option * archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*archive_name*offset-for-ciphertext*method:type::file_name * * or (inlined binary) * * archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*1*hex(full encrypted file)*method:type::file_name * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rar; #elif FMT_REGISTERS_H john_register_one(&fmt_rar); #else #include <string.h> #include <errno.h> #if AC_BUILT #include "autoconfig.h" #endif #if _MSC_VER || __MINGW32__ || __MINGW64__ || __CYGWIN__ || HAVE_WINDOWS_H #include "win32_memmap.h" #if !defined(__CYGWIN__) && !defined(__MINGW64__) #include "mmap-windows.c" #elif defined HAVE_MMAP #include <sys/mman.h> #endif #elif defined(HAVE_MMAP) #include <sys/mman.h> #endif #include "arch.h" #include "sha.h" #include "crc32.h" #include "misc.h" #include "common.h" #include "formats.h" #include "dyna_salt.h" #include "memory.h" #include "params.h" #include "options.h" #include "unicode.h" #include "johnswap.h" #include "unrar.h" #include "config.h" #include "jumbo.h" #define FORMAT_LABEL "rar" #define FORMAT_NAME "RAR3" #ifdef DEBUG #define BENCHMARK_COMMENT " (1-16 characters)" #else #define BENCHMARK_COMMENT " (4 characters)" #endif #define BENCHMARK_LENGTH -1 #define UNICODE_LENGTH (2 * PLAINTEXT_LENGTH) #define BINARY_SIZE 0 #define BINARY_ALIGN MEM_ALIGN_NONE #define SALT_SIZE sizeof(rarfile*) #define SALT_ALIGN sizeof(rarfile*) #ifdef SIMD_COEF_32 #include "simd-intrinsics.h" #define NBKEYS (SIMD_COEF_32*SIMD_PARA_SHA1) #define GETPOS(i,idx) ( (idx&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)idx/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 ) #define HASH_IDX(idx) (((unsigned int)idx&(SIMD_COEF_32-1))+(unsigned int)idx/SIMD_COEF_32*5*SIMD_COEF_32) #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " AES" #define PLAINTEXT_LENGTH 26 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #else #define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define ROUNDS 0x40000 #ifdef _MSC_VER #undef _OPENMP #endif #ifdef _OPENMP #include <omp.h> #endif #include "rar_common.c" #include "memdbg.h" // these are supposed to be stack arrays; however gcc cannot correctly align // stack arrays so we have to use global arrays; we may switch back to stack // arrays (which take less space) when gcc fixes this issue #ifdef SIMD_COEF_32 static uint8_t (*vec_in)[2][NBKEYS*64]; static uint32_t (*vec_out)[NBKEYS*5]; static uint8_t (*tmp_in)[NBKEYS*64]; static uint32_t (*tmp_out)[NBKEYS*5]; #endif static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; self->params.max_keys_per_crypt *= omp_t; #endif /* _OPENMP */ // Length is a cost. We sort in buckets but we need them to be mostly full self->params.max_keys_per_crypt *= PLAINTEXT_LENGTH; if (options.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); unpack_data = mem_calloc(omp_t, sizeof(unpack_data_t)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); // allocate 1 more slot to handle the tail of vector buffer saved_key = mem_calloc(self->params.max_keys_per_crypt + 1, UNICODE_LENGTH); saved_len = mem_calloc(self->params.max_keys_per_crypt + 1, sizeof(*saved_len)); if (!saved_salt) saved_salt = mem_calloc(8, 1); aes_key = mem_calloc(self->params.max_keys_per_crypt + 1, 16); aes_iv = mem_calloc(self->params.max_keys_per_crypt + 1, 16); #ifdef SIMD_COEF_32 vec_in = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*vec_in), MEM_ALIGN_CACHE); vec_out = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*vec_out), MEM_ALIGN_CACHE); tmp_in = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*tmp_in), MEM_ALIGN_CACHE); tmp_out = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*tmp_out), MEM_ALIGN_CACHE); #endif #ifdef DEBUG self->params.benchmark_comment = " (1-16 characters)"; #endif /* CRC-32 table init, do it before we start multithreading */ { CRC32_t crc; CRC32_Init(&crc); } } static void done(void) { MEM_FREE(aes_iv); MEM_FREE(aes_key); MEM_FREE(saved_len); MEM_FREE(saved_key); MEM_FREE(cracked); MEM_FREE(unpack_data); MEM_FREE(saved_salt); #ifdef SIMD_COEF_32 MEM_FREE(vec_in); MEM_FREE(vec_out); MEM_FREE(tmp_in); MEM_FREE(tmp_out); #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef SIMD_COEF_32 int len; int *indices; int tot_todo = 0; /* Tricky formula, see GitHub #1692 :-) */ indices = mem_calloc(count + MIN(PLAINTEXT_LENGTH + 1, count) * (NBKEYS - 1), sizeof(*indices)); // sort passwords by length for (len = 0; len <= PLAINTEXT_LENGTH*2; len += 2) { for (index = 0; index < count; ++index) { if (saved_len[index] == len) indices[tot_todo++] = index; } while (tot_todo % NBKEYS) indices[tot_todo++] = count; } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < tot_todo; index += NBKEYS) { unsigned int i, j, k; uint8_t (*RawPsw)[NBKEYS*64] = vec_in[index/NBKEYS]; uint32_t *digest = vec_out[index/NBKEYS]; // all passwords in one batch has the same length int pw_len = saved_len[indices[index]]; int RawLength = pw_len + 8 + 3; int cur_len = 0; int fst_blk = 1; int cur_buf = 0; unsigned char tmp1 = 0, tmp2 = 0; for (i = 0; i < ROUNDS; ++i) { // copy passwords to vector buffer for (j = 0; j < NBKEYS; ++j) { int idx = indices[index + j]; int len = cur_len; for (k = 0; k < pw_len; ++k) { RawPsw[(len & 64)>>6][GETPOS(len%64, j)] = saved_key[UNICODE_LENGTH*idx + k]; len++; } for (k = 0; k < 8; ++k) { RawPsw[(len & 64)>>6][GETPOS(len%64, j)] = saved_salt[k]; len++; } RawPsw[(len & 64)>>6][GETPOS(len%64, j)] = (unsigned char)i; len++; if ( ((unsigned char) i) == 0) { tmp1 = (unsigned char)(i >> 8); tmp2 = (unsigned char)(i >> 16); } RawPsw[(len & 64)>>6][GETPOS(len%64, j)] = tmp1; len++; RawPsw[(len & 64)>>6][GETPOS(len%64, j)] = tmp2; } cur_len += RawLength; if (i % (ROUNDS / 16) == 0) { uint8_t *tempin = tmp_in[index/NBKEYS]; uint32_t *tempout = tmp_out[index/NBKEYS]; memcpy(tempin, RawPsw[cur_buf], NBKEYS*64); for (j = 0; j < NBKEYS; ++j) { // padding uint32_t *tail; for (k = RawLength; k < 64; ++k) tempin[GETPOS(k, j)] = 0; tempin[GETPOS(RawLength, j)] = 0x80; tail = (uint32_t*)&tempin[GETPOS(64 - 1, j)]; *tail = cur_len*8; } if (i == 0) SIMDSHA1body(tempin, tempout, NULL, SSEi_MIXED_IN); else SIMDSHA1body(tempin, tempout, digest, SSEi_MIXED_IN | SSEi_RELOAD); for (j = 0; j < NBKEYS; ++j) { int idx = indices[index + j]; aes_iv[idx*16 + i/(ROUNDS/16)] = (uint8_t)tempout[HASH_IDX(j) + 4*SIMD_COEF_32]; } } // swap out and compute digests on the filled buffer if ((cur_len & 64) != (cur_buf << 6)) { if (fst_blk) SIMDSHA1body(RawPsw[cur_buf], digest, NULL, SSEi_MIXED_IN); else SIMDSHA1body(RawPsw[cur_buf], digest, digest, SSEi_MIXED_IN | SSEi_RELOAD); fst_blk = 0; cur_buf = 1 - cur_buf; } } // padding memset(RawPsw[0], 0, sizeof(RawPsw[0])); for (j = 0; j < NBKEYS; ++j) { uint32_t *tail; RawPsw[0][GETPOS(0, j)] = 0x80; tail = (uint32_t*)&RawPsw[0][GETPOS(64 - 1, j)]; *tail = cur_len*8; } SIMDSHA1body(RawPsw[0], digest, digest, SSEi_MIXED_IN | SSEi_RELOAD); for (j = 0; j < NBKEYS; ++j) { for (i = 0; i < 4; ++i) { int idx = indices[index + j]; uint32_t *dst = (uint32_t*)&aes_key[idx*16]; dst[i] = digest[HASH_IDX(j) + i*SIMD_COEF_32]; } } } MEM_FREE(indices); #else #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i16 = index*16; unsigned int i; unsigned char RawPsw[UNICODE_LENGTH + 8 + 3]; int RawLength; SHA_CTX ctx, tempctx; unsigned int digest[5]; unsigned char *PswNum, tempout[20]; RawLength = saved_len[index] + 8 + 3; PswNum = (unsigned char*) &RawPsw[saved_len[index] + 8]; PswNum[1] = PswNum[2] = 0; /* derive IV and key for AES from saved_key and saved_salt, this code block is based on unrarhp's and unrar's sources */ memcpy(RawPsw, &saved_key[UNICODE_LENGTH * index], saved_len[index]); memcpy(RawPsw + saved_len[index], saved_salt, 8); SHA1_Init(&ctx); for (i = 0; i < ROUNDS; i++) { PswNum[0] = (unsigned char) i; if ( ((unsigned char) i) == 0) { PswNum[1] = (unsigned char) (i >> 8); PswNum[2] = (unsigned char) (i >> 16); } SHA1_Update(&ctx, RawPsw, RawLength); if (i % (ROUNDS / 16) == 0) { tempctx = ctx; SHA1_Final(tempout, &tempctx); aes_iv[i16 + i / (ROUNDS / 16)] = tempout[19]; } } SHA1_Final((unsigned char*)digest, &ctx); for (i = 0; i < 4; i++) /* reverse byte order */ digest[i] = JOHNSWAP(digest[i]); memcpy(&aes_key[i16], (unsigned char*)digest, 16); } #endif check_rar(count); return count; } struct fmt_main fmt_rar = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, cpu_tests },{ init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
DRB068-restrictpointer2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The restrict type qualifier is an indication to the compiler that, if the memory addressed by the restrict -qualified pointer is modified, no other pointer will access that same memory. If a particular chunk of memory is not modified, it can be aliased through more than one restricted pointer. A C99 restrict feature. For gcc, you must use -std=c99 to compile this program. */ #include <stdlib.h> #include <stdio.h> void init(int n, int * restrict a, int * restrict b, int * restrict c) { int i; #pragma omp parallel for for (i = 0; i < n; i++) { a[i] = 1; b[i] = i; c[i] = i * i; } } void foo(int n, int * restrict a, int * restrict b, int * restrict c) { int i; #pragma omp parallel for for (i = 0; i < n; i++) a[i] = b[i] + c[i]; } void print(int n, int * restrict a, int * restrict b, int * restrict c) { int i; for (i = 0; i < n; i++) { printf("%d %d %d\n", a[i], b[i], c[i]); } } int main() { int n = 1000; int * a , *b, *c; a = (int*) malloc (n* sizeof (int)); if (a ==0) { fprintf (stderr, "skip the execution due to malloc failures.\n"); return 1; } b = (int*) malloc (n* sizeof (int)); if (b ==0) { fprintf (stderr, "skip the execution due to malloc failures.\n"); return 1; } c = (int*) malloc (n* sizeof (int)); if (c ==0) { fprintf (stderr, "skip the execution due to malloc failures.\n"); return 1; } init (n, a, b,c); foo (n, a, b,c); print (n, a, b,c); free (a); free (b); free (c); return 0; }
GB_binop__le_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__le_uint32 // A.*B function (eWiseMult): GB_AemultB__le_uint32 // A*D function (colscale): GB_AxD__le_uint32 // D*A function (rowscale): GB_DxB__le_uint32 // C+=B function (dense accum): GB_Cdense_accumB__le_uint32 // C+=b function (dense accum): GB_Cdense_accumb__le_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_uint32 // C=scalar+B GB_bind1st__le_uint32 // C=scalar+B' GB_bind1st_tran__le_uint32 // C=A+scalar GB_bind2nd__le_uint32 // C=A'+scalar GB_bind2nd_tran__le_uint32 // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT32 || GxB_NO_LE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__le_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__le_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__le_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__le_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__le_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__le_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__le_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__le_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__le_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__le_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__le_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pst_fmt_plug.c
/* PST cracker patch for JtR. Hacked together during July of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> * * Optimizations and shift to pkzip CRC32 code done by JimF * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Uses code from crc32_fmt_plug.c written by JimF */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pst; #elif FMT_REGISTERS_H john_register_one(&fmt_pst); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "crc32.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 16384 // core i7 no HT #endif #endif static int omp_t = 1; #endif #include "memdbg.h" #define FORMAT_LABEL "PST" #define FORMAT_NAME "custom CRC-32" #define FORMAT_TAG "$pst$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 8 #define BINARY_SIZE 4 #define SALT_SIZE 0 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 256 static struct fmt_tests tests[] = { {"$pst$a9290513", "openwall"}, /* "jfuck jw" works too ;) */ {"$pst$50e099bc", "password"}, {"$pst$00000000", ""}, {"$pst$e3da3318", "xxx"}, {"$pst$a655dd18", "XYz123"}, {"$pst$29b14070", "thisisalongstring"}, {"$pst$25b44615", "string with space"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out); static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; p = ciphertext + FORMAT_TAG_LEN; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) return 0; return 1; } static void set_key(char *key, int index) { strnzcpyn(saved_key[index], key, sizeof(*saved_key)); } static int cmp_all(void *binary, int count) { uint32_t crc=*((uint32_t*)binary), i; for (i = 0; i < count; ++i) if (crc == crypt_out[i]) return 1; return 0; } static int cmp_one(void *binary, int index) { return *((uint32_t*)binary) == crypt_out[index]; } static int cmp_exact(char *source, int index) { return 1; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i; #ifdef _OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < count; ++i) { CRC32_t crc = 0; unsigned char *p = (unsigned char*)saved_key[i]; while (*p) crc = jtr_crc32(crc, *p++); crypt_out[i] = crc; } return count; } static void *get_binary(char *ciphertext) { static uint32_t *out; if (!out) out = mem_alloc_tiny(sizeof(uint32_t), MEM_ALIGN_WORD); sscanf(&ciphertext[FORMAT_TAG_LEN], "%x", out); return out; } static char *get_key(int index) { return saved_key[index]; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" struct fmt_main fmt_pst = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_TRUNC | FMT_8_BIT | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
__clang_cuda_complex_builtins.h
/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---=== * * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. * See https://llvm.org/LICENSE.txt for license information. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception * *===-----------------------------------------------------------------------=== */ #ifndef __CLANG_CUDA_COMPLEX_BUILTINS #define __CLANG_CUDA_COMPLEX_BUILTINS // This header defines __muldc3, __mulsc3, __divdc3, and __divsc3. These are // libgcc functions that clang assumes are available when compiling c99 complex // operations. (These implementations come from libc++, and have been modified // to work with CUDA and OpenMP target offloading [in C and C++ mode].) #pragma push_macro("__DEVICE__") #if defined(__OPENMP_NVPTX__) || defined(__OPENMP_AMDGCN__) #pragma omp declare target #define __DEVICE__ __attribute__((noinline, nothrow, cold, weak)) #else #define __DEVICE__ __device__ inline #endif // To make the algorithms available for C and C++ in CUDA and OpenMP we select // different but equivalent function versions. TODO: For OpenMP we currently // select the native builtins as the overload support for templates is lacking. #if !defined(__OPENMP_NVPTX__) && !defined(__OPENMP_AMDGCN__) #define _ISNANd std::isnan #define _ISNANf std::isnan #define _ISINFd std::isinf #define _ISINFf std::isinf #define _ISFINITEd std::isfinite #define _ISFINITEf std::isfinite #define _COPYSIGNd std::copysign #define _COPYSIGNf std::copysign #define _SCALBNd std::scalbn #define _SCALBNf std::scalbn #define _ABSd std::abs #define _ABSf std::abs #define _LOGBd std::logb #define _LOGBf std::logb // Rather than pulling in std::max from algorithm everytime, use available ::max. #define _fmaxd max #define _fmaxf max #else #ifdef __AMDGCN__ #define _ISNANd __ocml_isnan_f64 #define _ISNANf __ocml_isnan_f32 #define _ISINFd __ocml_isinf_f64 #define _ISINFf __ocml_isinf_f32 #define _ISFINITEd __ocml_isfinite_f64 #define _ISFINITEf __ocml_isfinite_f32 #define _COPYSIGNd __ocml_copysign_f64 #define _COPYSIGNf __ocml_copysign_f32 #define _SCALBNd __ocml_scalbn_f64 #define _SCALBNf __ocml_scalbn_f32 #define _ABSd __ocml_fabs_f64 #define _ABSf __ocml_fabs_f32 #define _LOGBd __ocml_logb_f64 #define _LOGBf __ocml_logb_f32 #define _fmaxd __ocml_fmax_f64 #define _fmaxf __ocml_fmax_f32 #else #define _ISNANd __nv_isnand #define _ISNANf __nv_isnanf #define _ISINFd __nv_isinfd #define _ISINFf __nv_isinff #define _ISFINITEd __nv_isfinited #define _ISFINITEf __nv_finitef #define _COPYSIGNd __nv_copysign #define _COPYSIGNf __nv_copysignf #define _SCALBNd __nv_scalbn #define _SCALBNf __nv_scalbnf #define _ABSd __nv_fabs #define _ABSf __nv_fabsf #define _LOGBd __nv_logb #define _LOGBf __nv_logbf #define _fmaxd __nv_fmax #define _fmaxf __nv_fmaxf #endif #endif #if defined(__cplusplus) extern "C" { #endif __DEVICE__ double _Complex __muldc3(double __a, double __b, double __c, double __d) { double __ac = __a * __c; double __bd = __b * __d; double __ad = __a * __d; double __bc = __b * __c; double _Complex z; __real__(z) = __ac - __bd; __imag__(z) = __ad + __bc; if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) { int __recalc = 0; if (_ISINFd(__a) || _ISINFd(__b)) { __a = _COPYSIGNd(_ISINFd(__a) ? 1 : 0, __a); __b = _COPYSIGNd(_ISINFd(__b) ? 1 : 0, __b); if (_ISNANd(__c)) __c = _COPYSIGNd(0, __c); if (_ISNANd(__d)) __d = _COPYSIGNd(0, __d); __recalc = 1; } if (_ISINFd(__c) || _ISINFd(__d)) { __c = _COPYSIGNd(_ISINFd(__c) ? 1 : 0, __c); __d = _COPYSIGNd(_ISINFd(__d) ? 1 : 0, __d); if (_ISNANd(__a)) __a = _COPYSIGNd(0, __a); if (_ISNANd(__b)) __b = _COPYSIGNd(0, __b); __recalc = 1; } if (!__recalc && (_ISINFd(__ac) || _ISINFd(__bd) || _ISINFd(__ad) || _ISINFd(__bc))) { if (_ISNANd(__a)) __a = _COPYSIGNd(0, __a); if (_ISNANd(__b)) __b = _COPYSIGNd(0, __b); if (_ISNANd(__c)) __c = _COPYSIGNd(0, __c); if (_ISNANd(__d)) __d = _COPYSIGNd(0, __d); __recalc = 1; } if (__recalc) { // Can't use std::numeric_limits<double>::infinity() -- that doesn't have // a device overload (and isn't constexpr before C++11, naturally). __real__(z) = __builtin_huge_val() * (__a * __c - __b * __d); __imag__(z) = __builtin_huge_val() * (__a * __d + __b * __c); } } return z; } __DEVICE__ float _Complex __mulsc3(float __a, float __b, float __c, float __d) { float __ac = __a * __c; float __bd = __b * __d; float __ad = __a * __d; float __bc = __b * __c; float _Complex z; __real__(z) = __ac - __bd; __imag__(z) = __ad + __bc; if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) { int __recalc = 0; if (_ISINFf(__a) || _ISINFf(__b)) { __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a); __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b); if (_ISNANf(__c)) __c = _COPYSIGNf(0, __c); if (_ISNANf(__d)) __d = _COPYSIGNf(0, __d); __recalc = 1; } if (_ISINFf(__c) || _ISINFf(__d)) { __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c); __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d); if (_ISNANf(__a)) __a = _COPYSIGNf(0, __a); if (_ISNANf(__b)) __b = _COPYSIGNf(0, __b); __recalc = 1; } if (!__recalc && (_ISINFf(__ac) || _ISINFf(__bd) || _ISINFf(__ad) || _ISINFf(__bc))) { if (_ISNANf(__a)) __a = _COPYSIGNf(0, __a); if (_ISNANf(__b)) __b = _COPYSIGNf(0, __b); if (_ISNANf(__c)) __c = _COPYSIGNf(0, __c); if (_ISNANf(__d)) __d = _COPYSIGNf(0, __d); __recalc = 1; } if (__recalc) { __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d); __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c); } } return z; } __DEVICE__ double _Complex __divdc3(double __a, double __b, double __c, double __d) { int __ilogbw = 0; // Can't use std::max, because that's defined in <algorithm>, and we don't // want to pull that in for every compile. The CUDA headers define // ::max(float, float) and ::max(double, double), which is sufficient for us. double __logbw = _LOGBd(_fmaxd(_ABSd(__c), _ABSd(__d))); if (_ISFINITEd(__logbw)) { __ilogbw = (int)__logbw; __c = _SCALBNd(__c, -__ilogbw); __d = _SCALBNd(__d, -__ilogbw); } double __denom = __c * __c + __d * __d; double _Complex z; __real__(z) = _SCALBNd((__a * __c + __b * __d) / __denom, -__ilogbw); __imag__(z) = _SCALBNd((__b * __c - __a * __d) / __denom, -__ilogbw); if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) { if ((__denom == 0.0) && (!_ISNANd(__a) || !_ISNANd(__b))) { __real__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __a; __imag__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __b; } else if ((_ISINFd(__a) || _ISINFd(__b)) && _ISFINITEd(__c) && _ISFINITEd(__d)) { __a = _COPYSIGNd(_ISINFd(__a) ? 1.0 : 0.0, __a); __b = _COPYSIGNd(_ISINFd(__b) ? 1.0 : 0.0, __b); __real__(z) = __builtin_huge_val() * (__a * __c + __b * __d); __imag__(z) = __builtin_huge_val() * (__b * __c - __a * __d); } else if (_ISINFd(__logbw) && __logbw > 0.0 && _ISFINITEd(__a) && _ISFINITEd(__b)) { __c = _COPYSIGNd(_ISINFd(__c) ? 1.0 : 0.0, __c); __d = _COPYSIGNd(_ISINFd(__d) ? 1.0 : 0.0, __d); __real__(z) = 0.0 * (__a * __c + __b * __d); __imag__(z) = 0.0 * (__b * __c - __a * __d); } } return z; } __DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) { int __ilogbw = 0; float __logbw = _LOGBf(_fmaxf(_ABSf(__c), _ABSf(__d))); if (_ISFINITEf(__logbw)) { __ilogbw = (int)__logbw; __c = _SCALBNf(__c, -__ilogbw); __d = _SCALBNf(__d, -__ilogbw); } float __denom = __c * __c + __d * __d; float _Complex z; __real__(z) = _SCALBNf((__a * __c + __b * __d) / __denom, -__ilogbw); __imag__(z) = _SCALBNf((__b * __c - __a * __d) / __denom, -__ilogbw); if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) { if ((__denom == 0) && (!_ISNANf(__a) || !_ISNANf(__b))) { __real__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __a; __imag__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __b; } else if ((_ISINFf(__a) || _ISINFf(__b)) && _ISFINITEf(__c) && _ISFINITEf(__d)) { __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a); __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b); __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d); __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d); } else if (_ISINFf(__logbw) && __logbw > 0 && _ISFINITEf(__a) && _ISFINITEf(__b)) { __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c); __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d); __real__(z) = 0 * (__a * __c + __b * __d); __imag__(z) = 0 * (__b * __c - __a * __d); } } return z; } #if defined(__cplusplus) } // extern "C" #endif #undef _ISNANd #undef _ISNANf #undef _ISINFd #undef _ISINFf #undef _COPYSIGNd #undef _COPYSIGNf #undef _ISFINITEd #undef _ISFINITEf #undef _SCALBNd #undef _SCALBNf #undef _ABSd #undef _ABSf #undef _LOGBd #undef _LOGBf #undef _fmaxd #undef _fmaxf #if defined(__OPENMP_NVPTX__) || defined(__OPENMP_AMDGCN__) #pragma omp end declare target #endif #pragma pop_macro("__DEVICE__") #endif // __CLANG_CUDA_COMPLEX_BUILTINS
ExtraRocksDBController.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_SPANN_EXTRADBSEARCHER_H_ #define _SPTAG_SPANN_EXTRADBSEARCHER_H_ #include "inc/Helper/VectorSetReader.h" #include "inc/Helper/AsyncFileReader.h" #include "IExtraSearcher.h" #include "ExtraFullGraphSearcher.h" #include "../Common/TruthSet.h" #include "inc/Helper/KeyValueIO.h" #include "rocksdb/db.h" #include "rocksdb/slice.h" #include "rocksdb/options.h" #include "rocksdb/merge_operator.h" #include <map> #include <cmath> #include <climits> #include <future> namespace SPTAG::SPANN { inline bool sort_docid_cmp(const Edge& a, const Edge& b) { return a.tonode < b.tonode; } class RocksDBIO : public Helper::KeyValueIO { public: RocksDBIO() = default; ~RocksDBIO() override { db->Close(); DestroyDB(dbPath, dbOptions); delete db; } bool Initialize(const char* filePath) override { dbPath = std::string(filePath); dbOptions.create_if_missing = true; dbOptions.IncreaseParallelism(); dbOptions.OptimizeLevelStyleCompaction(); dbOptions.merge_operator.reset(new AnnMergeOperator); auto s = rocksdb::DB::Open(dbOptions, dbPath, &db); LOG(Helper::LogLevel::LL_Info, "SPFresh: New Rocksdb: %s\n", filePath); return s == rocksdb::Status::OK(); } void ShutDown() override { db->Close(); DestroyDB(dbPath, dbOptions); delete db; } ErrorCode Get(const std::string& key, std::string* value) override { auto s = db->Get(rocksdb::ReadOptions(), key, value); if (s == rocksdb::Status::OK()) { return ErrorCode::Success; } else { return ErrorCode::Fail; } } ErrorCode Get(SizeType key, std::string* value) override { return Get(Helper::Convert::Serialize<SizeType>(&key), value); } ErrorCode Put(const std::string& key, const std::string& value) override { auto s = db->Put(rocksdb::WriteOptions(), key, value); if (s == rocksdb::Status::OK()) { return ErrorCode::Success; } else { return ErrorCode::Fail; } } ErrorCode Put(SizeType key, const std::string& value) override { return Put(Helper::Convert::Serialize<SizeType>(&key), value); } ErrorCode Put(SizeType key, SizeType id, const void* vector, SizeType dim) override { using Helper::Convert::Serialize; std::string posting(Serialize<SizeType>(&id) + Serialize<SizeType>(vector, dim)); return Put(key, posting); } class AnnMergeOperator : public rocksdb::AssociativeMergeOperator { public: bool Merge(const rocksdb::Slice& key, const rocksdb::Slice* existing_value, const rocksdb::Slice& value, std::string* new_value, rocksdb::Logger* logger) const override { std::string newPosting; if(existing_value) { newPosting += (*existing_value).ToString(); newPosting += value.ToString(); } else { newPosting += value.ToString(); } *new_value = newPosting; return true; } const char* Name() const override { return "AnnMergeOperator"; } }; ErrorCode Merge(SizeType key, const std::string& value) { if (value.empty()) { LOG(Helper::LogLevel::LL_Error, "Error! empty append posting!\n"); } auto s = db->Merge(rocksdb::WriteOptions(), Helper::Convert::Serialize<int>(&key, 1), value); if (s == rocksdb::Status::OK()) { return ErrorCode::Success; } else { return ErrorCode::Fail; } } ErrorCode Delete(SizeType key) override { auto s = db->Delete(rocksdb::WriteOptions(), Helper::Convert::Serialize<int>(&key, 1)); if (s == rocksdb::Status::OK()) { return ErrorCode::Success; } else { return ErrorCode::Fail; } } void ForceCompaction() { LOG(Helper::LogLevel::LL_Info, "Start Compaction\n"); db->CompactRange(rocksdb::CompactRangeOptions(), nullptr, nullptr); LOG(Helper::LogLevel::LL_Info, "Finish Compaction\n"); } private: std::string dbPath; rocksdb::DB* db{}; rocksdb::Options dbOptions; }; template <typename ValueType> class ExtraRocksDBController : public IExtraSearcher { private: RocksDBIO db; std::atomic_uint64_t m_postingNum{}; public: ExtraRocksDBController(const char* dbPath, int dim) { db.Initialize(dbPath); m_vectorInfoSize = dim * sizeof(ValueType) + sizeof(int);} ~ExtraRocksDBController() override = default; bool LoadIndex(Options& p_opt) override { /* m_extraFullGraphFile = p_opt.m_indexDirectory + FolderSep + p_opt.m_ssdIndex; std::string curFile = m_extraFullGraphFile; do { auto curIndexFile = f_createAsyncIO(); if (curIndexFile == nullptr || !curIndexFile->Initialize(curFile.c_str(), std::ios::binary | std::ios::in, #ifdef BATCH_READ p_opt.m_searchInternalResultNum, 2, 2, p_opt.m_iSSDNumberOfThreads #else p_opt.m_searchInternalResultNum * p_opt.m_iSSDNumberOfThreads / p_opt.m_ioThreads + 1, 2, 2, p_opt.m_ioThreads #endif )) { LOG(Helper::LogLevel::LL_Error, "Cannot open file:%s!\n", curFile.c_str()); return false; } m_indexFiles.emplace_back(curIndexFile); m_listInfos.emplace_back(0); m_totalListCount += LoadingHeadInfo(curFile, p_opt.m_searchPostingPageLimit, m_listInfos.back()); curFile = m_extraFullGraphFile + "_" + std::to_string(m_indexFiles.size()); } while (fileexists(curFile.c_str())); m_listPerFile = static_cast<int>((m_totalListCount + m_indexFiles.size() - 1) / m_indexFiles.size()); #ifndef _MSC_VER Helper::AIOTimeout.tv_nsec = p_opt.m_iotimeout * 1000; #endif */ return true; } virtual void SearchIndex(ExtraWorkSpace* p_exWorkSpace, QueryResult& p_queryResults, std::shared_ptr<VectorIndex> p_index, SearchStats* p_stats, const COMMON::Labelset& m_deletedID, std::set<int>* truth, std::map<int, std::set<int>>* found) override { const auto postingListCount = static_cast<uint32_t>(p_exWorkSpace->m_postingIDs.size()); p_exWorkSpace->m_deduper.clear(); COMMON::QueryResultSet<ValueType>& queryResults = *((COMMON::QueryResultSet<ValueType>*)&p_queryResults); int diskRead = 0; int diskIO = 0; int listElements = 0; for (uint32_t pi = 0; pi < postingListCount; ++pi) { auto curPostingID = p_exWorkSpace->m_postingIDs[pi]; std::string postingList; SearchIndex(curPostingID, postingList); int vectorNum = postingList.size() / m_vectorInfoSize; diskIO++; diskRead++; listElements += vectorNum; for (int i = 0; i < vectorNum; i++) { char* vectorInfo = postingList.data() + i * m_vectorInfoSize; int vectorID = *(reinterpret_cast<int*>(vectorInfo)); if (m_deletedID.Contains(vectorID) || p_exWorkSpace->m_deduper.CheckAndSet(vectorID)) continue; auto distance2leaf = p_index->ComputeDistance(queryResults.GetQuantizedTarget(), vectorInfo + sizeof(int)); queryResults.AddPoint(vectorID, distance2leaf); } if (truth) { for (int i = 0; i < vectorNum; ++i) { char* vectorInfo = postingList.data() + i * m_vectorInfoSize; int vectorID = *(reinterpret_cast<int*>(vectorInfo)); if (truth->count(vectorID) != 0) (*found)[curPostingID].insert(vectorID); } } } if (p_stats) { p_stats->m_totalListElementsCount = listElements; p_stats->m_diskIOCount = diskIO; p_stats->m_diskAccessCount = diskRead; } } bool BuildIndex(std::shared_ptr<Helper::VectorSetReader>& p_reader, std::shared_ptr<VectorIndex> p_headIndex, Options& p_opt) override { std::string outputFile = p_opt.m_indexDirectory + FolderSep + p_opt.m_ssdIndex; if (outputFile.empty()) { LOG(Helper::LogLevel::LL_Error, "Output file can't be empty!\n"); return false; } int numThreads = p_opt.m_iSSDNumberOfThreads; int candidateNum = p_opt.m_internalResultNum; std::unordered_set<SizeType> headVectorIDS; if (p_opt.m_headIDFile.empty()) { LOG(Helper::LogLevel::LL_Error, "Not found VectorIDTranslate!\n"); return false; } { auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize((p_opt.m_indexDirectory + FolderSep + p_opt.m_headIDFile).c_str(), std::ios::binary | std::ios::in)) { LOG(Helper::LogLevel::LL_Error, "failed open VectorIDTranslate: %s\n", p_opt.m_headIDFile.c_str()); return false; } std::uint64_t vid; while (ptr->ReadBinary(sizeof(vid), reinterpret_cast<char*>(&vid)) == sizeof(vid)) { headVectorIDS.insert(static_cast<SizeType>(vid)); } LOG(Helper::LogLevel::LL_Info, "Loaded %u Vector IDs\n", static_cast<uint32_t>(headVectorIDS.size())); } SizeType fullCount = 0; size_t vectorInfoSize = 0; { auto fullVectors = p_reader->GetVectorSet(); fullCount = fullVectors->Count(); vectorInfoSize = fullVectors->PerVectorDataSize() + sizeof(int); } Selection selections(static_cast<size_t>(fullCount) * p_opt.m_replicaCount, p_opt.m_tmpdir); LOG(Helper::LogLevel::LL_Info, "Full vector count:%d Edge bytes:%llu selection size:%zu, capacity size:%zu\n", fullCount, sizeof(Edge), selections.m_selections.size(), selections.m_selections.capacity()); std::vector<std::atomic_int> replicaCount(fullCount); std::vector<std::atomic_int> postingListSize(headVectorIDS.size()); for (auto& pls : postingListSize) pls = 0; std::unordered_set<SizeType> emptySet; SizeType batchSize = (fullCount + p_opt.m_batches - 1) / p_opt.m_batches; auto t1 = std::chrono::high_resolution_clock::now(); if (p_opt.m_batches > 1) selections.SaveBatch(); { LOG(Helper::LogLevel::LL_Info, "Preparation done, start candidate searching.\n"); SizeType sampleSize = p_opt.m_samples; std::vector<SizeType> samples(sampleSize, 0); for (int i = 0; i < p_opt.m_batches; i++) { SizeType start = i * batchSize; SizeType end = min(start + batchSize, fullCount); auto fullVectors = p_reader->GetVectorSet(start, end); if (p_opt.m_distCalcMethod == DistCalcMethod::Cosine && !p_reader->IsNormalized()) fullVectors->Normalize(p_opt.m_iSSDNumberOfThreads); emptySet.clear(); int sampleNum = 0; for (int j = start; j < end && sampleNum < sampleSize; j++) { if (headVectorIDS.count(j) == 0) samples[sampleNum++] = j - start; } float acc = 0; #pragma omp parallel for schedule(dynamic) for (int j = 0; j < sampleNum; j++) { COMMON::Utils::atomic_float_add(&acc, COMMON::TruthSet::CalculateRecall(p_headIndex.get(), fullVectors->GetVector(samples[j]), candidateNum)); } acc = acc / sampleNum; LOG(Helper::LogLevel::LL_Info, "Batch %d vector(%d,%d) loaded with %d vectors (%zu) HeadIndex acc @%d:%f.\n", i, start, end, fullVectors->Count(), selections.m_selections.size(), candidateNum, acc); p_headIndex->ApproximateRNG(fullVectors, emptySet, candidateNum, selections.m_selections.data(), p_opt.m_replicaCount, numThreads, p_opt.m_gpuSSDNumTrees, p_opt.m_gpuSSDLeafSize, p_opt.m_rngFactor, p_opt.m_numGPUs); for (SizeType j = start; j < end; j++) { replicaCount[j] = 0; size_t vecOffset = j * (size_t)p_opt.m_replicaCount; for (int resNum = 0; resNum < p_opt.m_replicaCount && selections[vecOffset + resNum].node != INT_MAX; resNum++) { ++postingListSize[selections[vecOffset + resNum].node]; selections[vecOffset + resNum].tonode = j; ++replicaCount[j]; } } if (p_opt.m_batches > 1) selections.SaveBatch(); } } auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Searching replicas ended. Search Time: %.2lf mins\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()) / 60.0); if (p_opt.m_batches > 1) selections.LoadBatch(0, static_cast<size_t>(fullCount) * p_opt.m_replicaCount); // Sort results either in CPU or GPU VectorIndex::SortSelections(&selections.m_selections); auto t3 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Time to sort selections:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count()) / 1000); if (p_opt.m_postingPageLimit > 0) { m_postingSizeLimit = static_cast<int>(p_opt.m_postingPageLimit * PageSize / vectorInfoSize); } LOG(Helper::LogLevel::LL_Info, "Posting size limit: %d\n", m_postingSizeLimit); auto postingSizeLimit = m_postingSizeLimit; { std::vector<int> replicaCountDist(p_opt.m_replicaCount + 1, 0); for (int i = 0; i < replicaCount.size(); ++i) { ++replicaCountDist[replicaCount[i]]; } LOG(Helper::LogLevel::LL_Info, "Before Posting Cut:\n"); for (int i = 0; i < replicaCountDist.size(); ++i) { LOG(Helper::LogLevel::LL_Info, "Replica Count Dist: %d, %d\n", i, replicaCountDist[i]); } } #pragma omp parallel for schedule(dynamic) for (int i = 0; i < postingListSize.size(); ++i) { std::size_t selectIdx = std::lower_bound(selections.m_selections.begin(), selections.m_selections.end(), i, Selection::g_edgeComparer) - selections.m_selections.begin(); if (postingListSize[i] <= postingSizeLimit) { std::sort(selections.m_selections.begin() + selectIdx, selections.m_selections.begin() + selectIdx + postingListSize[i], sort_docid_cmp); continue; } for (size_t dropID = postingSizeLimit; dropID < postingListSize[i]; ++dropID) { int tonode = selections.m_selections[selectIdx + dropID].tonode; --replicaCount[tonode]; } postingListSize[i] = postingSizeLimit; std::sort(selections.m_selections.begin() + selectIdx, selections.m_selections.begin() + selectIdx + postingListSize[i], sort_docid_cmp); } if (p_opt.m_outputEmptyReplicaID) { std::vector<int> replicaCountDist(p_opt.m_replicaCount + 1, 0); auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize("EmptyReplicaID.bin", std::ios::binary | std::ios::out)) { LOG(Helper::LogLevel::LL_Error, "Fail to create EmptyReplicaID.bin!\n"); return false; } for (int i = 0; i < replicaCount.size(); ++i) { ++replicaCountDist[replicaCount[i]]; if (replicaCount[i] < 2) { long long vid = i; if (ptr->WriteBinary(sizeof(vid), reinterpret_cast<char*>(&vid)) != sizeof(vid)) { LOG(Helper::LogLevel::LL_Error, "Failt to write EmptyReplicaID.bin!"); return false; } } } LOG(Helper::LogLevel::LL_Info, "After Posting Cut:\n"); for (int i = 0; i < replicaCountDist.size(); ++i) { LOG(Helper::LogLevel::LL_Info, "Replica Count Dist: %d, %d\n", i, replicaCountDist[i]); } } auto t4 = std::chrono::high_resolution_clock::now(); LOG(SPTAG::Helper::LogLevel::LL_Info, "Time to perform posting cut:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t4 - t3).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count()) / 1000); if (p_opt.m_ssdIndexFileNum > 1) selections.SaveBatch(); auto fullVectors = p_reader->GetVectorSet(); if (p_opt.m_distCalcMethod == DistCalcMethod::Cosine && !p_reader->IsNormalized()) fullVectors->Normalize(p_opt.m_iSSDNumberOfThreads); for (int id = 0; id < postingListSize.size(); id++) { std::string postinglist; std::size_t selectIdx = selections.lower_bound(id); for (int j = 0; j < postingListSize[id]; ++j) { if (selections[selectIdx].node != id) { LOG(Helper::LogLevel::LL_Error, "Selection ID NOT MATCH\n"); exit(1); } int fullID = selections[selectIdx++].tonode; size_t dim = fullVectors->Dimension(); // First Vector ID, then Vector postinglist += Helper::Convert::Serialize<int>(&fullID, 1); postinglist += Helper::Convert::Serialize<ValueType>(fullVectors->GetVector(fullID), dim); } AddIndex(id, postinglist); } auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize(p_opt.m_ssdInfoFile.c_str(), std::ios::binary | std::ios::out)) { LOG(Helper::LogLevel::LL_Error, "Failed open file %s\n", p_opt.m_ssdInfoFile.c_str()); exit(1); } //Number of all documents. int i32Val = static_cast<int>(fullCount); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndexInfo File!"); exit(1); } //Number of postings i32Val = static_cast<int>(postingListSize.size()); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndexInfo File!"); exit(1); } for(int id = 0; id < postingListSize.size(); id++) { i32Val = postingListSize[id].load(); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndexInfo File!"); exit(1); } } LOG(Helper::LogLevel::LL_Info, "SPFresh: initialize deleteMap\n"); COMMON::Labelset m_deleteID; m_deleteID.Initialize(fullCount, p_headIndex->m_iDataBlockSize, p_headIndex->m_iDataCapacity); LOG(Helper::LogLevel::LL_Info, "SPFresh: save deleteMap\n"); m_deleteID.Save(p_opt.m_fullDeletedIDFile); auto t5 = std::chrono::high_resolution_clock::now(); double elapsedSeconds = std::chrono::duration_cast<std::chrono::seconds>(t5 - t1).count(); LOG(Helper::LogLevel::LL_Info, "Total used time: %.2lf minutes (about %.2lf hours).\n", elapsedSeconds / 60.0, elapsedSeconds / 3600.0); return true; } ErrorCode AppendPosting(SizeType headID, const std::string& appendPosting) override { if (appendPosting.empty()) { LOG(Helper::LogLevel::LL_Error, "Error! empty append posting!\n"); } return db.Merge(headID, appendPosting); } void ForceCompaction() override { db.ForceCompaction(); } inline ErrorCode SearchIndex(SizeType headID, std::string& posting) override { return db.Get(headID, &posting); } inline ErrorCode AddIndex(SizeType headID, const std::string& posting) override { m_postingNum++; return db.Put(headID, posting); } inline ErrorCode DeleteIndex(SizeType headID) override { m_postingNum--; return db.Delete(headID); } inline ErrorCode OverrideIndex(SizeType headID, const std::string& posting) override { return db.Put(headID, posting); } inline SizeType GetIndexSize() override { return m_postingNum; } inline SizeType GetPostingSizeLimit() override { return m_postingSizeLimit; } private: struct ListInfo { int listEleCount = 0; std::uint16_t listPageCount = 0; std::uint64_t listOffset = 0; std::uint16_t pageOffset = 0; }; int LoadingHeadInfo(const std::string& p_file, int p_postingPageLimit, std::vector<ListInfo>& m_listInfos) { auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize(p_file.c_str(), std::ios::binary | std::ios::in)) { LOG(Helper::LogLevel::LL_Error, "Failed to open file: %s\n", p_file.c_str()); exit(1); } int m_listCount; int m_totalDocumentCount; int m_iDataDimension; int m_listPageOffset; if (ptr->ReadBinary(sizeof(m_listCount), reinterpret_cast<char*>(&m_listCount)) != sizeof(m_listCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_totalDocumentCount), reinterpret_cast<char*>(&m_totalDocumentCount)) != sizeof(m_totalDocumentCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_iDataDimension), reinterpret_cast<char*>(&m_iDataDimension)) != sizeof(m_iDataDimension)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listPageOffset), reinterpret_cast<char*>(&m_listPageOffset)) != sizeof(m_listPageOffset)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (m_vectorInfoSize == 0) m_vectorInfoSize = m_iDataDimension * sizeof(ValueType) + sizeof(int); else if (m_vectorInfoSize != m_iDataDimension * sizeof(ValueType) + sizeof(int)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file! DataDimension and ValueType are not match!\n"); exit(1); } m_listInfos.resize(m_listCount); size_t totalListElementCount = 0; std::map<int, int> pageCountDist; size_t biglistCount = 0; size_t biglistElementCount = 0; int pageNum; for (int i = 0; i < m_listCount; ++i) { if (ptr->ReadBinary(sizeof(pageNum), reinterpret_cast<char*>(&(pageNum))) != sizeof(pageNum)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listInfos[i].pageOffset), reinterpret_cast<char*>(&(m_listInfos[i].pageOffset))) != sizeof(m_listInfos[i].pageOffset)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listInfos[i].listEleCount), reinterpret_cast<char*>(&(m_listInfos[i].listEleCount))) != sizeof(m_listInfos[i].listEleCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listInfos[i].listPageCount), reinterpret_cast<char*>(&(m_listInfos[i].listPageCount))) != sizeof(m_listInfos[i].listPageCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } m_listInfos[i].listOffset = (static_cast<uint64_t>(m_listPageOffset + pageNum) << PageSizeEx); m_listInfos[i].listEleCount = min(m_listInfos[i].listEleCount, (min(static_cast<int>(m_listInfos[i].listPageCount), p_postingPageLimit) << PageSizeEx) / m_vectorInfoSize); m_listInfos[i].listPageCount = static_cast<std::uint16_t>(ceil((m_vectorInfoSize * m_listInfos[i].listEleCount + m_listInfos[i].pageOffset) * 1.0 / (1 << PageSizeEx))); totalListElementCount += m_listInfos[i].listEleCount; int pageCount = m_listInfos[i].listPageCount; if (pageCount > 1) { ++biglistCount; biglistElementCount += m_listInfos[i].listEleCount; } if (pageCountDist.count(pageCount) == 0) { pageCountDist[pageCount] = 1; } else { pageCountDist[pageCount] += 1; } } LOG(Helper::LogLevel::LL_Info, "Finish reading header info, list count %d, total doc count %d, dimension %d, list page offset %d.\n", m_listCount, m_totalDocumentCount, m_iDataDimension, m_listPageOffset); LOG(Helper::LogLevel::LL_Info, "Big page (>4K): list count %zu, total element count %zu.\n", biglistCount, biglistElementCount); LOG(Helper::LogLevel::LL_Info, "Total Element Count: %llu\n", totalListElementCount); for (auto& ele : pageCountDist) { LOG(Helper::LogLevel::LL_Info, "Page Count Dist: %d %d\n", ele.first, ele.second); } return m_listCount; } private: std::string m_extraFullGraphFile; // std::vector<std::vector<ListInfo>> m_listInfos; std::vector<std::shared_ptr<Helper::DiskPriorityIO>> m_indexFiles; int m_vectorInfoSize = 0; // int m_totalListCount = 0; // int m_listPerFile = 0; int m_postingSizeLimit = INT_MAX; }; } // namespace SPTAG #endif // _SPTAG_SPANN_EXTRADBSEARCHER_H_